├── .gitignore ├── images ├── background_workflow.png └── Bria Logo.svg ├── pyproject.toml ├── .github └── workflows │ └── publish.yml ├── nodes ├── eraser_node.py ├── shot_by_image_node.py ├── shot_by_text_node.py ├── shot_by_image_manual_padding_node.py ├── shot_by_text_manual_padding_node.py ├── tailored_model_info_node.py ├── shot_by_image_automatic_aspect_ratio_node.py ├── shot_by_text_automatic_aspect_ratio_node.py ├── shot_by_image_automatic_node.py ├── video_nodes │ ├── load_video.py │ ├── video_utils.py │ ├── remove_video_background_node.py │ ├── preview_video_node_from_url.py │ ├── video_mask_by_prompt_node.py │ ├── video_increase_resolution_node.py │ ├── video_erase_elements_node.py │ ├── video_mask_by_key_points_node.py │ └── video_solid_color_background_node.py ├── shot_by_text_automatic_node.py ├── shot_by_image_custom_coordinates_node.py ├── shot_by_text_custom_coordinates_node.py ├── shot_by_image_manual_placement_node.py ├── shot_by_text_manual_placement_node.py ├── __init__.py ├── text_2_image_hd_node.py ├── attribution_by_image_node.py ├── tailored_portrait_node.py ├── reimagine_node.py ├── generate_structured_prompt_node_v2.py ├── generate_structured_prompt_lite_node_v2.py ├── rmbg_node.py ├── remove_foreground_node.py ├── generative_fill_node.py ├── tailored_gen_node.py ├── text_2_image_fast_node.py ├── text_2_image_base_node.py ├── generate_image_lite_node_v2.py ├── generate_image_node_v2.py ├── replace_bg_node.py ├── refine_image_lite_node_v2.py ├── image_expansion_node.py ├── refine_image_node_v2.py ├── common.py └── utils │ └── shot_utils.py ├── workflows ├── eraser_genfill_workflow.json ├── tailored_workflow.json ├── text_to_image_workflow.json ├── background_generation_workflow.json ├── product_original_ shot_generation_workflow.json └── Video_Editig_Workflow.json ├── __init__.py └── Readme.md /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | .idea -------------------------------------------------------------------------------- /images/background_workflow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Bria-AI/ComfyUI-BRIA-API/HEAD/images/background_workflow.png -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "comfyui-bria-api" 3 | description = "Custom nodes for ComfyUI using BRIA's API." 4 | version = "2.1.11" 5 | license = {file = "LICENSE"} 6 | 7 | [project.urls] 8 | Repository = "https://github.com/Bria-AI/ComfyUI-BRIA-API" 9 | # Used by Comfy Registry https://comfyregistry.org 10 | 11 | [tool.comfy] 12 | PublisherId = "briaai" 13 | DisplayName = "ComfyUI-BRIA-API" 14 | Icon = "" 15 | -------------------------------------------------------------------------------- /.github/workflows/publish.yml: -------------------------------------------------------------------------------- 1 | name: Publish to Comfy registry 2 | on: 3 | workflow_dispatch: 4 | push: 5 | branches: 6 | - main 7 | paths: 8 | - "pyproject.toml" 9 | 10 | permissions: 11 | issues: write 12 | 13 | jobs: 14 | publish-node: 15 | name: Publish Custom Node to registry 16 | runs-on: ubuntu-latest 17 | if: ${{ github.repository_owner == 'Bria-AI' }} 18 | steps: 19 | - name: Check out code 20 | uses: actions/checkout@v4 21 | - name: Publish Custom Node 22 | uses: Comfy-Org/publish-node-action@v1 23 | with: 24 | ## Add your own personal access token to your Github Repository secrets and reference it here. 25 | personal_access_token: ${{ secrets.REGISTRY_ACCESS_TOKEN }} 26 | -------------------------------------------------------------------------------- /nodes/eraser_node.py: -------------------------------------------------------------------------------- 1 | from .common import process_request 2 | 3 | class EraserNode(): 4 | @classmethod 5 | def INPUT_TYPES(self): 6 | return { 7 | "required": { 8 | "image": ("IMAGE",), # Input image from another node 9 | "mask": ("MASK",), # Binary mask input 10 | "api_key": ("STRING", {"default": "BRIA_API_TOKEN"}) # API Key input with a default value 11 | }, 12 | "optional": { 13 | "visual_input_content_moderation": ("BOOLEAN", {"default": False}), 14 | "visual_output_content_moderation": ("BOOLEAN", {"default": False}), 15 | } 16 | } 17 | 18 | RETURN_TYPES = ("IMAGE",) 19 | RETURN_NAMES = ("output_image",) 20 | CATEGORY = "API Nodes" 21 | FUNCTION = "execute" # This is the method that will be executed 22 | 23 | def __init__(self): 24 | self.api_url = "https://engine.prod.bria-api.com/v2/image/edit/erase" # Eraser API URL 25 | 26 | # Define the execute method as expected by ComfyUI 27 | def execute(self, image, mask, api_key, visual_input_content_moderation, visual_output_content_moderation): 28 | return process_request(self.api_url, image, mask, api_key, visual_input_content_moderation, visual_output_content_moderation) 29 | -------------------------------------------------------------------------------- /nodes/shot_by_image_node.py: -------------------------------------------------------------------------------- 1 | from .utils.shot_utils import get_image_input_types, create_image_payload, make_api_request, shot_by_image_api_url, PlacementType 2 | 3 | 4 | class ShotByImageOriginalNode: 5 | @classmethod 6 | def INPUT_TYPES(self): 7 | input_types = get_image_input_types() 8 | return input_types 9 | 10 | RETURN_TYPES = ("IMAGE",) 11 | RETURN_NAMES = ("output_image",) 12 | CATEGORY = "API Nodes" 13 | FUNCTION = "execute" 14 | 15 | def __init__(self): 16 | self.api_url = shot_by_image_api_url 17 | 18 | def execute( 19 | self, 20 | image, 21 | ref_image, 22 | api_key, 23 | sync=True, 24 | enhance_ref_image=True, 25 | ref_image_influence=1.0, 26 | force_rmbg=False, 27 | content_moderation=False, 28 | ): 29 | payload = create_image_payload( 30 | image, 31 | ref_image, 32 | api_key, 33 | PlacementType.ORIGINAL.value, 34 | original_quality=True, 35 | sync=sync, 36 | enhance_ref_image=enhance_ref_image, 37 | ref_image_influence=ref_image_influence, 38 | force_rmbg=force_rmbg, 39 | content_moderation=content_moderation, 40 | ) 41 | return make_api_request(self.api_url, payload, api_key) 42 | -------------------------------------------------------------------------------- /nodes/shot_by_text_node.py: -------------------------------------------------------------------------------- 1 | from .utils.shot_utils import get_text_input_types, create_text_payload, make_api_request, shot_by_text_api_url, PlacementType 2 | 3 | 4 | class ShotByTextOriginalNode: 5 | @classmethod 6 | def INPUT_TYPES(self): 7 | input_types = get_text_input_types() 8 | return input_types 9 | 10 | RETURN_TYPES = ("IMAGE",) 11 | RETURN_NAMES = ("output_image",) 12 | CATEGORY = "API Nodes" 13 | FUNCTION = "execute" 14 | 15 | def __init__(self): 16 | self.api_url = shot_by_text_api_url 17 | def execute( 18 | self, 19 | image, 20 | scene_description, 21 | mode, 22 | api_key, 23 | sync=True, 24 | optimize_description=True, 25 | exclude_elements="", 26 | force_rmbg=False, 27 | content_moderation=False, 28 | ): 29 | payload = create_text_payload( 30 | image, 31 | api_key, 32 | scene_description, 33 | mode, 34 | PlacementType.ORIGINAL.value, 35 | original_quality=True, 36 | sync=sync, 37 | optimize_description=optimize_description, 38 | exclude_elements=exclude_elements, 39 | force_rmbg=force_rmbg, 40 | content_moderation=content_moderation, 41 | ) 42 | return make_api_request(self.api_url, payload, api_key) 43 | -------------------------------------------------------------------------------- /nodes/shot_by_image_manual_padding_node.py: -------------------------------------------------------------------------------- 1 | from .utils.shot_utils import get_image_input_types, create_image_payload, make_api_request, shot_by_image_api_url, PlacementType 2 | 3 | 4 | class ShotByImageManualPaddingNode: 5 | @classmethod 6 | def INPUT_TYPES(self): 7 | input_types = get_image_input_types() 8 | input_types["required"]["padding_values"] = ("STRING", {"default": "0,0,0,0"}) 9 | 10 | return input_types 11 | 12 | RETURN_TYPES = ("IMAGE",) 13 | RETURN_NAMES = ("output_image",) 14 | CATEGORY = "API Nodes" 15 | FUNCTION = "execute" 16 | 17 | def __init__(self): 18 | self.api_url = shot_by_image_api_url 19 | 20 | def execute( 21 | self, 22 | image, 23 | ref_image, 24 | padding_values, 25 | api_key, 26 | sync=False, 27 | enhance_ref_image=True, 28 | ref_image_influence=1.0, 29 | force_rmbg=False, 30 | content_moderation=False, 31 | ): 32 | payload = create_image_payload( 33 | image, 34 | ref_image, 35 | api_key, 36 | PlacementType.MANUAL_PADDING.value, 37 | padding_values=padding_values, 38 | sync=sync, 39 | enhance_ref_image=enhance_ref_image, 40 | ref_image_influence=ref_image_influence, 41 | force_rmbg=force_rmbg, 42 | content_moderation=content_moderation, 43 | ) 44 | return make_api_request(self.api_url, payload, api_key) 45 | -------------------------------------------------------------------------------- /nodes/shot_by_text_manual_padding_node.py: -------------------------------------------------------------------------------- 1 | from .utils.shot_utils import get_text_input_types, create_text_payload, make_api_request, shot_by_text_api_url, PlacementType 2 | 3 | 4 | class ShotByTextManualPaddingNode: 5 | @classmethod 6 | def INPUT_TYPES(self): 7 | input_types = get_text_input_types() 8 | input_types["required"]["padding_values"] = ("STRING", {"default": "0,0,0,0"}) 9 | return input_types 10 | 11 | RETURN_TYPES = ("IMAGE",) 12 | RETURN_NAMES = ("output_image",) 13 | CATEGORY = "API Nodes" 14 | FUNCTION = "execute" 15 | 16 | def __init__(self): 17 | self.api_url = shot_by_text_api_url 18 | 19 | def execute( 20 | self, 21 | image, 22 | scene_description, 23 | mode, 24 | padding_values, 25 | api_key, 26 | sync=False, 27 | optimize_description=True, 28 | exclude_elements="", 29 | force_rmbg=False, 30 | content_moderation=False, 31 | ): 32 | payload = create_text_payload( 33 | image, 34 | api_key, 35 | scene_description, 36 | mode, 37 | PlacementType.MANUAL_PADDING.value, 38 | padding_values=padding_values, 39 | sync=sync, 40 | optimize_description=optimize_description, 41 | exclude_elements=exclude_elements, 42 | force_rmbg=force_rmbg, 43 | content_moderation=content_moderation, 44 | ) 45 | return make_api_request(self.api_url, payload, api_key) 46 | -------------------------------------------------------------------------------- /nodes/tailored_model_info_node.py: -------------------------------------------------------------------------------- 1 | import requests 2 | from .common import deserialize_and_get_comfy_key 3 | 4 | class TailoredModelInfoNode(): 5 | @classmethod 6 | def INPUT_TYPES(self): 7 | return { 8 | "required": { 9 | "model_id": ("STRING",), 10 | "api_key": ("STRING", ) 11 | } 12 | } 13 | 14 | RETURN_TYPES = ("STRING", "STRING","INT", "INT", ) 15 | RETURN_NAMES = ("generation_prefix", "model_id", "default_fast", "default_steps_num", ) 16 | CATEGORY = "API Nodes" 17 | FUNCTION = "execute" # This is the method that will be executed 18 | 19 | def __init__(self): 20 | self.api_url = "https://engine.prod.bria-api.com/v1/tailored-gen/models/" 21 | 22 | # Define the execute method as expected by ComfyUI 23 | def execute(self, model_id, api_key): 24 | api_key = deserialize_and_get_comfy_key(api_key) 25 | response = requests.get( 26 | self.api_url + model_id, 27 | headers={"api_token": api_key} 28 | ) 29 | if response.status_code == 200: 30 | generation_prefix = response.json()["generation_prefix"] 31 | training_version = response.json()["training_version"] 32 | default_fast = 1 if training_version == "light" else 0 33 | default_steps_num = 8 if training_version == "light" else 30 34 | return (generation_prefix, model_id, default_fast, default_steps_num,) 35 | else: 36 | raise Exception(f"Error: API request failed with status code {response.status_code} and text {response.text}") 37 | -------------------------------------------------------------------------------- /nodes/shot_by_image_automatic_aspect_ratio_node.py: -------------------------------------------------------------------------------- 1 | from .utils.shot_utils import get_image_input_types, create_image_payload, make_api_request, shot_by_image_api_url, PlacementType 2 | 3 | 4 | class ShotByImageAutomaticAspectRatioNode: 5 | @classmethod 6 | def INPUT_TYPES(self): 7 | input_types = get_image_input_types() 8 | input_types["required"]["aspect_ratio"] = ( 9 | ["1:1", "2:3", "3:2", "3:4", "4:3", "4:5", "5:4", "9:16", "16:9"], 10 | {"default": "1:1"}, 11 | ) 12 | return input_types 13 | 14 | RETURN_TYPES = ("IMAGE",) 15 | RETURN_NAMES = ("output_image",) 16 | CATEGORY = "API Nodes" 17 | FUNCTION = "execute" 18 | 19 | def __init__(self): 20 | self.api_url = shot_by_image_api_url 21 | 22 | def execute( 23 | self, 24 | image, 25 | ref_image, 26 | aspect_ratio, 27 | api_key, 28 | sync=False, 29 | enhance_ref_image=True, 30 | ref_image_influence=1.0, 31 | force_rmbg=False, 32 | content_moderation=False, 33 | ): 34 | payload = create_image_payload( 35 | image, 36 | ref_image, 37 | api_key, 38 | PlacementType.AUTOMATIC_ASPECT_RATIO.value, 39 | aspect_ratio=aspect_ratio, 40 | sync=sync, 41 | enhance_ref_image=enhance_ref_image, 42 | ref_image_influence=ref_image_influence, 43 | force_rmbg=force_rmbg, 44 | content_moderation=content_moderation, 45 | ) 46 | return make_api_request(self.api_url, payload, api_key) 47 | -------------------------------------------------------------------------------- /nodes/shot_by_text_automatic_aspect_ratio_node.py: -------------------------------------------------------------------------------- 1 | from .utils.shot_utils import get_text_input_types, create_text_payload, make_api_request, shot_by_text_api_url, PlacementType 2 | 3 | 4 | class ShotByTextAutomaticAspectRatioNode: 5 | @classmethod 6 | def INPUT_TYPES(self): 7 | input_types = get_text_input_types() 8 | input_types["required"]["aspect_ratio"] = ( 9 | ["1:1", "2:3", "3:2", "3:4", "4:3", "4:5", "5:4", "9:16", "16:9"], 10 | {"default": "1:1"}, 11 | ) 12 | return input_types 13 | 14 | RETURN_TYPES = ("IMAGE",) 15 | RETURN_NAMES = ("output_image",) 16 | CATEGORY = "API Nodes" 17 | FUNCTION = "execute" 18 | 19 | def __init__(self): 20 | self.api_url = shot_by_text_api_url 21 | 22 | def execute( 23 | self, 24 | image, 25 | scene_description, 26 | mode, 27 | aspect_ratio, 28 | api_key, 29 | sync=False, 30 | optimize_description=True, 31 | exclude_elements="", 32 | force_rmbg=False, 33 | content_moderation=False, 34 | ): 35 | payload = create_text_payload( 36 | image, 37 | api_key, 38 | scene_description, 39 | mode, 40 | PlacementType.AUTOMATIC_ASPECT_RATIO.value, 41 | aspect_ratio=aspect_ratio, 42 | sync=sync, 43 | optimize_description=optimize_description, 44 | exclude_elements=exclude_elements, 45 | force_rmbg=force_rmbg, 46 | content_moderation=content_moderation, 47 | ) 48 | return make_api_request(self.api_url, payload, api_key) 49 | -------------------------------------------------------------------------------- /nodes/shot_by_image_automatic_node.py: -------------------------------------------------------------------------------- 1 | from .utils.shot_utils import get_image_input_types, create_image_payload, make_api_request, shot_by_image_api_url, PlacementType 2 | 3 | 4 | class ShotByImageAutomaticNode: 5 | @classmethod 6 | def INPUT_TYPES(self): 7 | input_types = get_image_input_types() 8 | input_types["required"]["shot_size"] = ("STRING", {"default": "1000, 1000"}) 9 | return input_types 10 | 11 | RETURN_TYPES = ("IMAGE", "IMAGE", "IMAGE", "IMAGE", "IMAGE", "IMAGE", "IMAGE") 12 | RETURN_NAMES = ( 13 | "output_image_1", 14 | "output_image_2", 15 | "output_image_3", 16 | "output_image_4", 17 | "output_image_5", 18 | "output_image_6", 19 | "output_image_7", 20 | ) 21 | CATEGORY = "API Nodes" 22 | FUNCTION = "execute" 23 | 24 | def __init__(self): 25 | self.api_url = shot_by_image_api_url 26 | 27 | def execute( 28 | self, 29 | image, 30 | ref_image, 31 | shot_size, 32 | api_key, 33 | sync=False, 34 | enhance_ref_image=True, 35 | ref_image_influence=1.0, 36 | force_rmbg=False, 37 | content_moderation=False, 38 | ): 39 | payload = create_image_payload( 40 | image, 41 | ref_image, 42 | api_key, 43 | PlacementType.AUTOMATIC.value, 44 | shot_size=shot_size, 45 | sync=sync, 46 | enhance_ref_image=enhance_ref_image, 47 | ref_image_influence=ref_image_influence, 48 | force_rmbg=force_rmbg, 49 | content_moderation=content_moderation, 50 | ) 51 | return make_api_request(self.api_url, payload, api_key, Placement_type = PlacementType.AUTOMATIC.value) 52 | -------------------------------------------------------------------------------- /nodes/video_nodes/load_video.py: -------------------------------------------------------------------------------- 1 | import os 2 | import folder_paths 3 | 4 | class LoadVideoFramesNode: 5 | """ 6 | Load a video file from the input folder or upload. 7 | 8 | Parameters: 9 | video (str): Selected or uploaded video filename. 10 | 11 | Returns: 12 | video_path (STRING): Absolute path to the video file. 13 | """ 14 | 15 | @classmethod 16 | def INPUT_TYPES(cls): 17 | input_dir = folder_paths.get_input_directory() 18 | files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f))] 19 | files = folder_paths.filter_files_content_types(files, ["video"]) 20 | 21 | return { 22 | "required": { 23 | "video": (sorted(files), {"video_upload": True}), 24 | } 25 | } 26 | 27 | RETURN_TYPES = ("STRING",) 28 | RETURN_NAMES = ("video_path",) 29 | FUNCTION = "load_video" 30 | CATEGORY = "API Nodes" 31 | 32 | def load_video(self, video): 33 | video_path = folder_paths.get_annotated_filepath(video) 34 | if not os.path.exists(video_path): 35 | raise FileNotFoundError(f"Video file not found: {video_path}") 36 | 37 | return (video_path,) 38 | 39 | @classmethod 40 | def IS_CHANGED(cls, video, **kwargs): 41 | """Force re-execution when video file changes""" 42 | video_path = folder_paths.get_annotated_filepath(video) 43 | if os.path.exists(video_path): 44 | return os.path.getmtime(video_path) 45 | return float("nan") 46 | 47 | @classmethod 48 | def VALIDATE_INPUTS(cls, video, **kwargs): 49 | """Validate that the video file exists""" 50 | if not folder_paths.exists_annotated_filepath(video): 51 | return f"Invalid video file: {video}" 52 | return True -------------------------------------------------------------------------------- /nodes/shot_by_text_automatic_node.py: -------------------------------------------------------------------------------- 1 | from .utils.shot_utils import get_text_input_types, create_text_payload, make_api_request, shot_by_text_api_url, PlacementType 2 | 3 | 4 | class ShotByTextAutomaticNode: 5 | @classmethod 6 | def INPUT_TYPES(self): 7 | input_types = get_text_input_types() 8 | input_types["required"]["shot_size"] = ("STRING", {"default": "1000, 1000"}) 9 | return input_types 10 | 11 | RETURN_TYPES = ("IMAGE", "IMAGE", "IMAGE", "IMAGE", "IMAGE", "IMAGE", "IMAGE") 12 | RETURN_NAMES = ( 13 | "output_image_1", 14 | "output_image_2", 15 | "output_image_3", 16 | "output_image_4", 17 | "output_image_5", 18 | "output_image_6", 19 | "output_image_7", 20 | ) 21 | CATEGORY = "API Nodes" 22 | FUNCTION = "execute" 23 | 24 | def __init__(self): 25 | self.api_url = shot_by_text_api_url 26 | 27 | def execute( 28 | self, 29 | image, 30 | scene_description, 31 | mode, 32 | shot_size, 33 | api_key, 34 | sync=False, 35 | optimize_description=True, 36 | exclude_elements="", 37 | force_rmbg=False, 38 | content_moderation=False, 39 | ): 40 | payload = create_text_payload( 41 | image, 42 | api_key, 43 | scene_description, 44 | mode, 45 | PlacementType.AUTOMATIC.value, 46 | shot_size=shot_size, 47 | sync=sync, 48 | optimize_description=optimize_description, 49 | exclude_elements=exclude_elements, 50 | force_rmbg=force_rmbg, 51 | content_moderation=content_moderation, 52 | ) 53 | return make_api_request(self.api_url, payload, api_key, Placement_type= PlacementType.AUTOMATIC.value) 54 | -------------------------------------------------------------------------------- /nodes/shot_by_image_custom_coordinates_node.py: -------------------------------------------------------------------------------- 1 | from .utils.shot_utils import get_image_input_types, create_image_payload, make_api_request, shot_by_image_api_url, PlacementType 2 | 3 | 4 | class ShotByImageCustomCoordinatesNode: 5 | @classmethod 6 | def INPUT_TYPES(self): 7 | input_types = get_image_input_types() 8 | input_types["required"]["shot_size"] = ("STRING", {"default": "1000, 1000"}) 9 | input_types["required"]["foreground_image_size"] = ( 10 | "STRING", 11 | {"default": "500,500"}, 12 | ) 13 | input_types["required"]["foreground_image_location"] = ( 14 | "STRING", 15 | {"default": "0, 0"}, 16 | ) 17 | return input_types 18 | 19 | RETURN_TYPES = ("IMAGE",) 20 | RETURN_NAMES = ("output_image",) 21 | CATEGORY = "API Nodes" 22 | FUNCTION = "execute" 23 | 24 | def __init__(self): 25 | self.api_url = shot_by_image_api_url 26 | 27 | def execute( 28 | self, 29 | image, 30 | ref_image, 31 | shot_size, 32 | foreground_image_size, 33 | foreground_image_location, 34 | api_key, 35 | sync=False, 36 | enhance_ref_image=True, 37 | ref_image_influence=1.0, 38 | force_rmbg=False, 39 | content_moderation=False, 40 | ): 41 | payload = create_image_payload( 42 | image, 43 | ref_image, 44 | api_key, 45 | PlacementType.CUSTOM_COORDINATES.value, 46 | shot_size=shot_size, 47 | foreground_image_size=foreground_image_size, 48 | foreground_image_location=foreground_image_location, 49 | sync=sync, 50 | enhance_ref_image=enhance_ref_image, 51 | ref_image_influence=ref_image_influence, 52 | force_rmbg=force_rmbg, 53 | content_moderation=content_moderation, 54 | ) 55 | return make_api_request(self.api_url, payload, api_key) 56 | -------------------------------------------------------------------------------- /nodes/shot_by_text_custom_coordinates_node.py: -------------------------------------------------------------------------------- 1 | from .utils.shot_utils import get_text_input_types, create_text_payload, make_api_request, shot_by_text_api_url, PlacementType 2 | 3 | 4 | class ShotByTextCustomCoordinatesNode: 5 | @classmethod 6 | def INPUT_TYPES(self): 7 | input_types = get_text_input_types() 8 | input_types["required"]["shot_size"] = ("STRING", {"default": "1000, 1000"}) 9 | input_types["required"]["foreground_image_size"] = ( 10 | "STRING", 11 | {"default": "500,500"}, 12 | ) 13 | input_types["required"]["foreground_image_location"] = ( 14 | "STRING", 15 | {"default": "0, 0"}, 16 | ) 17 | return input_types 18 | 19 | RETURN_TYPES = ("IMAGE",) 20 | RETURN_NAMES = ("output_image",) 21 | CATEGORY = "API Nodes" 22 | FUNCTION = "execute" 23 | 24 | def __init__(self): 25 | self.api_url = shot_by_text_api_url 26 | 27 | def execute( 28 | self, 29 | image, 30 | scene_description, 31 | mode, 32 | shot_size, 33 | foreground_image_size, 34 | foreground_image_location, 35 | api_key, 36 | sync=False, 37 | optimize_description=True, 38 | exclude_elements="", 39 | force_rmbg=False, 40 | content_moderation=False, 41 | ): 42 | payload = create_text_payload( 43 | image, 44 | api_key, 45 | scene_description, 46 | mode, 47 | PlacementType.CUSTOM_COORDINATES.value, 48 | shot_size=shot_size, 49 | foreground_image_size=foreground_image_size, 50 | foreground_image_location=foreground_image_location, 51 | sync=sync, 52 | optimize_description=optimize_description, 53 | exclude_elements=exclude_elements, 54 | force_rmbg=force_rmbg, 55 | content_moderation=content_moderation, 56 | ) 57 | return make_api_request(self.api_url, payload, api_key) 58 | -------------------------------------------------------------------------------- /nodes/shot_by_image_manual_placement_node.py: -------------------------------------------------------------------------------- 1 | from .utils.shot_utils import get_image_input_types, create_image_payload, make_api_request, shot_by_image_api_url, PlacementType 2 | 3 | 4 | class ShotByImageManualPlacementNode: 5 | @classmethod 6 | def INPUT_TYPES(self): 7 | input_types = get_image_input_types() 8 | input_types["required"]["shot_size"] = ("STRING", {"default": "1000, 1000"}) 9 | input_types["required"]["manual_placement_selection"] = ( 10 | [ 11 | "upper_left", 12 | "upper_right", 13 | "bottom_left", 14 | "bottom_right", 15 | "right_center", 16 | "left_center", 17 | "upper_center", 18 | "bottom_center", 19 | "center_vertical", 20 | "center_horizontal", 21 | ], 22 | {"default": "upper_left"}, 23 | ) 24 | return input_types 25 | 26 | RETURN_TYPES = ("IMAGE",) 27 | RETURN_NAMES = ("output_image",) 28 | CATEGORY = "API Nodes" 29 | FUNCTION = "execute" 30 | 31 | def __init__(self): 32 | self.api_url = shot_by_image_api_url 33 | def execute( 34 | self, 35 | image, 36 | ref_image, 37 | shot_size, 38 | manual_placement_selection, 39 | api_key, 40 | sync=False, 41 | enhance_ref_image=True, 42 | ref_image_influence=1.0, 43 | force_rmbg=False, 44 | content_moderation=False, 45 | ): 46 | payload = create_image_payload( 47 | image, 48 | ref_image, 49 | api_key, 50 | PlacementType.MANUAL_PLACEMENT.value, 51 | shot_size=shot_size, 52 | manual_placement_selection=manual_placement_selection, 53 | sync=sync, 54 | enhance_ref_image=enhance_ref_image, 55 | ref_image_influence=ref_image_influence, 56 | force_rmbg=force_rmbg, 57 | content_moderation=content_moderation, 58 | ) 59 | return make_api_request(self.api_url, payload, api_key) 60 | -------------------------------------------------------------------------------- /nodes/shot_by_text_manual_placement_node.py: -------------------------------------------------------------------------------- 1 | from .utils.shot_utils import get_text_input_types, create_text_payload, make_api_request, shot_by_text_api_url, PlacementType 2 | 3 | 4 | class ShotByTextManualPlacementNode: 5 | @classmethod 6 | def INPUT_TYPES(self): 7 | input_types = get_text_input_types() 8 | input_types["required"]["shot_size"] = ("STRING", {"default": "1000, 1000"}) 9 | input_types["required"]["manual_placement_selection"] = ( 10 | [ 11 | "upper_left", 12 | "upper_right", 13 | "bottom_left", 14 | "bottom_right", 15 | "right_center", 16 | "left_center", 17 | "upper_center", 18 | "bottom_center", 19 | "center_vertical", 20 | "center_horizontal", 21 | ], 22 | {"default": "upper_left"}, 23 | ) 24 | return input_types 25 | 26 | RETURN_TYPES = ("IMAGE",) 27 | RETURN_NAMES = ("output_image",) 28 | CATEGORY = "API Nodes" 29 | FUNCTION = "execute" 30 | 31 | def __init__(self): 32 | self.api_url = shot_by_text_api_url 33 | 34 | def execute( 35 | self, 36 | image, 37 | scene_description, 38 | mode, 39 | shot_size, 40 | manual_placement_selection, 41 | api_key, 42 | sync=False, 43 | optimize_description=True, 44 | exclude_elements="", 45 | force_rmbg=False, 46 | content_moderation=False, 47 | ): 48 | payload = create_text_payload( 49 | image, 50 | api_key, 51 | scene_description, 52 | mode, 53 | PlacementType.MANUAL_PLACEMENT.value, 54 | shot_size=shot_size, 55 | manual_placement_selection=manual_placement_selection, 56 | sync=sync, 57 | optimize_description=optimize_description, 58 | exclude_elements=exclude_elements, 59 | force_rmbg=force_rmbg, 60 | content_moderation=content_moderation, 61 | ) 62 | return make_api_request(self.api_url, payload, api_key) 63 | -------------------------------------------------------------------------------- /nodes/video_nodes/video_utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | import requests 3 | 4 | 5 | def upload_video_to_s3(video_path, filename, api_token): 6 | api_url = "https://platform.prod.bria-api.com/upload-video/anonymous/presigned-url" 7 | headers = { 8 | "Content-Type": "application/json" 9 | } 10 | extension = os.path.splitext(filename)[1].lower() 11 | content_type_map = { 12 | '.mp4': 'video/mp4', 13 | '.webm': 'video/webm', 14 | '.mov': 'video/quicktime', 15 | '.mkv': 'video/x-matroska', 16 | '.avi': 'video/x-msvideo', 17 | '.gif': 'image/gif', 18 | '.webp': 'image/webp' 19 | } 20 | content_type = content_type_map.get(extension, 'video/mp4') 21 | if api_token: 22 | headers["api_token"] = api_token 23 | 24 | payload = { 25 | "file_name": filename, 26 | "content_type":content_type 27 | } 28 | 29 | print(f"Requesting presigned URL for: {filename}") 30 | 31 | try: 32 | response = requests.post(api_url, json=payload, headers=headers) 33 | 34 | if response.status_code != 200: 35 | raise Exception(f"Failed to get presigned URL: {response.status_code} {response.text}") 36 | 37 | response_data = response.json() 38 | video_url = response_data.get("video_url") 39 | upload_url = response_data.get("upload_url") 40 | 41 | if not video_url or not upload_url: 42 | raise Exception(f"Invalid response from presigned URL API: {response_data}") 43 | 44 | print(f"Received presigned URL") 45 | print(f"Video URL: {video_url}") 46 | 47 | # Step 2: Upload video to presigned URL 48 | print(f"Uploading video to S3...") 49 | 50 | with open(video_path, 'rb') as f: 51 | video_data = f.read() 52 | 53 | # Determine content type based on file extension 54 | upload_headers = { 55 | "Content-Type": content_type 56 | } 57 | 58 | upload_response = requests.put(upload_url, data=video_data, headers=upload_headers) 59 | 60 | if upload_response.status_code not in [200, 204]: 61 | raise Exception(f"Failed to upload video to S3: {upload_response.status_code}") 62 | 63 | print(f"Video uploaded successfully to S3") 64 | 65 | return video_url 66 | 67 | except Exception as e: 68 | raise Exception(f"Error uploading video to S3: {str(e)}") 69 | 70 | -------------------------------------------------------------------------------- /nodes/__init__.py: -------------------------------------------------------------------------------- 1 | from .eraser_node import EraserNode 2 | from .generative_fill_node import GenFillNode 3 | from .image_expansion_node import ImageExpansionNode 4 | from .replace_bg_node import ReplaceBgNode 5 | from .rmbg_node import RmbgNode 6 | from .remove_foreground_node import RemoveForegroundNode 7 | from .tailored_gen_node import TailoredGenNode 8 | from .tailored_model_info_node import TailoredModelInfoNode 9 | from .tailored_portrait_node import TailoredPortraitNode 10 | from .text_2_image_base_node import Text2ImageBaseNode 11 | from .text_2_image_fast_node import Text2ImageFastNode 12 | from .text_2_image_hd_node import Text2ImageHDNode 13 | from .reimagine_node import ReimagineNode 14 | from .generate_image_node_v2 import GenerateImageNodeV2 15 | from .generate_image_lite_node_v2 import GenerateImageLiteNodeV2 16 | from .refine_image_node_v2 import RefineImageNodeV2 17 | from .refine_image_lite_node_v2 import RefineImageLiteNodeV2 18 | from .generate_structured_prompt_node_v2 import GenerateStructuredPromptNodeV2 19 | from .generate_structured_prompt_lite_node_v2 import GenerateStructuredPromptLiteNodeV2 20 | from .shot_by_text_node import ShotByTextOriginalNode 21 | from .shot_by_text_automatic_aspect_ratio_node import ShotByTextAutomaticAspectRatioNode 22 | from .shot_by_text_automatic_node import ShotByTextAutomaticNode 23 | from .shot_by_text_custom_coordinates_node import ShotByTextCustomCoordinatesNode 24 | from .shot_by_text_manual_placement_node import ShotByTextManualPlacementNode 25 | from .shot_by_text_manual_padding_node import ShotByTextManualPaddingNode 26 | from .shot_by_image_automatic_aspect_ratio_node import ( 27 | ShotByImageAutomaticAspectRatioNode, 28 | ) 29 | from .shot_by_image_automatic_node import ShotByImageAutomaticNode 30 | from .shot_by_image_custom_coordinates_node import ShotByImageCustomCoordinatesNode 31 | from .shot_by_image_node import ShotByImageOriginalNode 32 | from .shot_by_image_manual_placement_node import ShotByImageManualPlacementNode 33 | from .shot_by_image_manual_padding_node import ShotByImageManualPaddingNode 34 | from .attribution_by_image_node import AttributionByImageNode 35 | from .video_nodes.remove_video_background_node import RemoveVideoBackgroundNode 36 | from .video_nodes.video_increase_resolution_node import VideoIncreaseResolutionNode 37 | from .video_nodes.video_solid_color_background_node import VideoSolidColorBackgroundNode 38 | from .video_nodes.video_erase_elements_node import VideoEraseElementsNode 39 | from .video_nodes.video_mask_by_prompt_node import VideoMaskByPromptNode 40 | from .video_nodes.video_mask_by_key_points_node import VideoMaskByKeyPointsNode 41 | from .video_nodes.load_video import LoadVideoFramesNode 42 | from .video_nodes.preview_video_node_from_url import PreviewVideoURLNode 43 | 44 | -------------------------------------------------------------------------------- /nodes/text_2_image_hd_node.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | from .common import deserialize_and_get_comfy_key, postprocess_image 4 | 5 | 6 | class Text2ImageHDNode(): 7 | @classmethod 8 | def INPUT_TYPES(self): 9 | return { 10 | "required": { 11 | "api_key": ("STRING", ), 12 | }, 13 | "optional": { 14 | "prompt": ("STRING",), 15 | "aspect_ratio": (["1:1", "2:3", "3:2", "3:4", "4:3", "4:5", "5:4", "9:16", "16:9"], {"default": "4:3"}), 16 | "seed": ("INT", {"default": -1}), 17 | "negative_prompt": ("STRING", {"default": ""}), 18 | "steps_num": ("INT", {"default": 30}), 19 | "prompt_enhancement": ("INT", {"default": 0}), 20 | "text_guidance_scale": ("INT", {"default": 5}), 21 | "medium": (["photography", "art", "none"], {"default": "none"}), 22 | "content_moderation": ("INT", {"default": 0}), 23 | } 24 | } 25 | 26 | RETURN_TYPES = ("IMAGE",) 27 | RETURN_NAMES = ("output_image",) 28 | CATEGORY = "API Nodes" 29 | FUNCTION = "execute" 30 | 31 | def __init__(self): 32 | self.api_url = "https://engine.prod.bria-api.com/v1/text-to-image/hd/2.2" #"http://0.0.0.0:5000/v1/text-to-image/hd/2.3" 33 | 34 | def execute( 35 | self, api_key, prompt, aspect_ratio, seed, negative_prompt, 36 | steps_num, prompt_enhancement, text_guidance_scale, medium, content_moderation=0, 37 | ): 38 | api_key = deserialize_and_get_comfy_key(api_key) 39 | payload = { 40 | "prompt": prompt, 41 | "num_results": 1, 42 | "aspect_ratio": aspect_ratio, 43 | "sync": True, 44 | "seed": seed, 45 | "negative_prompt": negative_prompt, 46 | "steps_num": steps_num, 47 | "text_guidance_scale": text_guidance_scale, 48 | "prompt_enhancement": prompt_enhancement, 49 | "content_moderation": content_moderation, 50 | } 51 | if medium != "none": 52 | payload["medium"] = medium 53 | response = requests.post( 54 | self.api_url, 55 | json=payload, 56 | headers={"api_token": api_key} 57 | ) 58 | if response.status_code == 200: 59 | response_dict = response.json() 60 | image_response = requests.get(response_dict['result'][0]["urls"][0]) 61 | result_image = postprocess_image(image_response.content) 62 | return (result_image,) 63 | else: 64 | raise Exception(f"Error: API request failed with status code {response.status_code} and text {response.text}") 65 | -------------------------------------------------------------------------------- /nodes/attribution_by_image_node.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import torch 3 | 4 | from .common import deserialize_and_get_comfy_key, preprocess_image, image_to_base64, poll_status_until_completed 5 | 6 | class AttributionByImageNode(): 7 | @classmethod 8 | def INPUT_TYPES(self): 9 | return { 10 | "required": { 11 | "image": ("IMAGE",), 12 | "model_version": (["2.3", "3.0","3.2"], {"default": "2.3"}), 13 | "api_key": ("STRING", {"default": "BRIA_API_TOKEN"}), 14 | }, 15 | } 16 | 17 | RETURN_TYPES = ("STRING",) 18 | RETURN_NAMES = ("api_response",) 19 | CATEGORY = "API Nodes" 20 | FUNCTION = "execute" # This is the method that will be executed 21 | 22 | def __init__(self): 23 | self.api_url = "https://engine.prod.bria-api.com/v2/image/attribution/by_image" 24 | 25 | # Define the execute method as expected by ComfyUI 26 | def execute(self, image, model_version, api_key): 27 | if api_key.strip() == "" or api_key.strip() == "BRIA_API_TOKEN": 28 | raise Exception("Please insert a valid API key.") 29 | api_key = deserialize_and_get_comfy_key(api_key) 30 | 31 | # Check if image is tensor, if so, convert to NumPy array 32 | if isinstance(image, torch.Tensor): 33 | image = preprocess_image(image) 34 | 35 | # Convert image to base64 for the new API format 36 | image_base64 = image_to_base64(image) 37 | payload = { 38 | "image": image_base64, 39 | "model_version": model_version, 40 | } 41 | 42 | headers = { 43 | "Content-Type": "application/json", 44 | "api_token": f"{api_key}" 45 | } 46 | 47 | try: 48 | response = requests.post(self.api_url, json=payload, headers=headers) 49 | 50 | if response.status_code == 200 or response.status_code == 202: 51 | print('Initial Attribution via Images API request successful, polling for completion...') 52 | response_dict = response.json() 53 | 54 | status_url = response_dict.get('status_url') 55 | request_id = response_dict.get('request_id') 56 | 57 | if not status_url: 58 | raise Exception("No status_url returned from API") 59 | 60 | print(f"Request ID: {request_id}, Status URL: {status_url}") 61 | 62 | final_response = poll_status_until_completed(status_url, api_key) 63 | return (str(final_response.get("result",{}).get("content")),) 64 | else: 65 | raise Exception(f"Error: API request failed with status code {response.status_code} {response.text}") 66 | except Exception as e: 67 | raise Exception(f"{e}") 68 | -------------------------------------------------------------------------------- /images/Bria Logo.svg: -------------------------------------------------------------------------------- 1 | 22 | -------------------------------------------------------------------------------- /workflows/eraser_genfill_workflow.json: -------------------------------------------------------------------------------- 1 | {"last_node_id":41,"last_link_id":62,"nodes":[{"id":14,"type":"Note","pos":[478,444],"size":[396.80859375,61.8046875],"flags":{},"order":0,"mode":0,"inputs":[],"outputs":[],"properties":{},"widgets_values":["Right click, and choose \"Open in Mask Editor\" to draw a mask of areas you want to erase."],"color":"#432","bgcolor":"#653"},{"id":15,"type":"Note","pos":[1080.3062744140625,445.9654541015625],"size":[306.28387451171875,58],"flags":{},"order":1,"mode":0,"inputs":[],"outputs":[],"properties":{},"widgets_values":["You can get your BRIA API token at:\nhttps://bria.ai/api/"],"color":"#432","bgcolor":"#653"},{"id":30,"type":"LoadImage","pos":[479,572],"size":[395.7845153808594,352.8512268066406],"flags":{},"order":2,"mode":0,"inputs":[],"outputs":[{"name":"IMAGE","type":"IMAGE","links":[56],"slot_index":0,"shape":3,"localized_name":"IMAGE"},{"name":"MASK","type":"MASK","links":[57],"slot_index":1,"shape":3,"localized_name":"MASK"}],"properties":{"Node name for S&R":"LoadImage"},"widgets_values":["clipspace/clipspace-mask-4068974.800000012.png [input]","image"]},{"id":37,"type":"PreviewImage","pos":[1504.4755859375,568.9967651367188],"size":[438.50262451171875,376.8338317871094],"flags":{},"order":6,"mode":0,"inputs":[{"name":"images","type":"IMAGE","link":58,"localized_name":"images"}],"outputs":[],"properties":{"Node name for S&R":"PreviewImage"},"widgets_values":[]},{"id":33,"type":"PreviewImage","pos":[1502.785888671875,1078.3564453125],"size":[433.29193115234375,357.1255187988281],"flags":{},"order":7,"mode":0,"inputs":[{"name":"images","type":"IMAGE","link":54,"localized_name":"images"}],"outputs":[],"properties":{"Node name for S&R":"PreviewImage"},"widgets_values":[]},{"id":40,"type":"LoadImage","pos":[541.1226806640625,1079.39697265625],"size":[315,314],"flags":{},"order":3,"mode":0,"inputs":[],"outputs":[{"name":"IMAGE","type":"IMAGE","links":[61],"slot_index":0,"localized_name":"IMAGE"},{"name":"MASK","type":"MASK","links":[62],"slot_index":1,"localized_name":"MASK"}],"properties":{"Node name for S&R":"LoadImage"},"widgets_values":["clipspace/clipspace-mask-4411367.100000024.png [input]","image"]},{"id":34,"type":"BriaGenFill","pos":[1032.416748046875,1073.984619140625],"size":[315,102],"flags":{},"order":5,"mode":0,"inputs":[{"name":"image","type":"IMAGE","link":61,"localized_name":"image"},{"name":"mask","type":"MASK","link":62,"localized_name":"mask"}],"outputs":[{"name":"output_image","type":"IMAGE","links":[54],"slot_index":0,"shape":3,"localized_name":"output_image"}],"properties":{"Node name for S&R":"BriaGenFill"},"widgets_values":["a blue coffee mug","BRIA_API_TOKEN"]},{"id":36,"type":"BriaEraser","pos":[1068.6063232421875,574.6080322265625],"size":[315,78],"flags":{},"order":4,"mode":0,"inputs":[{"name":"image","type":"IMAGE","link":56,"localized_name":"image"},{"name":"mask","type":"MASK","link":57,"localized_name":"mask"}],"outputs":[{"name":"output_image","type":"IMAGE","links":[58],"slot_index":0,"localized_name":"output_image"}],"properties":{"Node name for S&R":"BriaEraser"},"widgets_values":["BRIA_API_TOKEN"]}],"links":[[54,34,0,33,0,"IMAGE"],[56,30,0,36,0,"IMAGE"],[57,30,1,36,1,"MASK"],[58,36,0,37,0,"IMAGE"],[61,40,0,34,0,"IMAGE"],[62,40,1,34,1,"MASK"]],"groups":[],"config":{},"extra":{"ds":{"scale":0.6727499949325677,"offset":[131.53042816003972,-419.53430403204317]}},"version":0.4} -------------------------------------------------------------------------------- /nodes/tailored_portrait_node.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import requests 3 | from PIL import Image 4 | import io 5 | import torch 6 | 7 | from .common import deserialize_and_get_comfy_key, image_to_base64, preprocess_image 8 | 9 | class TailoredPortraitNode(): 10 | @classmethod 11 | def INPUT_TYPES(self): 12 | return { 13 | "required": { 14 | "image": ("IMAGE",), # Input image from another node 15 | "tailored_model_id": ("STRING",), # API Key input with a default value 16 | "api_key": ("STRING", {"default": "BRIA_API_TOKEN"}), # API Key input with a default value 17 | }, 18 | "optional": { 19 | "seed": ("INT", {"default": 123456}), 20 | "tailored_model_influence": ("FLOAT", {"default": 0.9}), 21 | "id_strength": ("FLOAT", {"default": 0.7}), 22 | } 23 | } 24 | 25 | RETURN_TYPES = ("IMAGE",) 26 | RETURN_NAMES = ("output_image",) 27 | CATEGORY = "API Nodes" 28 | FUNCTION = "execute" # This is the method that will be executed 29 | 30 | def __init__(self): 31 | self.api_url = "https://engine.prod.bria-api.com/v1/tailored-gen/restyle_portrait" # Eraser API URL 32 | 33 | # Define the execute method as expected by ComfyUI 34 | def execute(self, image, tailored_model_id, api_key, seed, tailored_model_influence, id_strength): 35 | if api_key.strip() == "" or api_key.strip() == "BRIA_API_TOKEN": 36 | raise Exception("Please insert a valid API key.") 37 | api_key = deserialize_and_get_comfy_key(api_key) 38 | 39 | # Convert the image and mask directly to if isinstance(image, torch.Tensor): 40 | if isinstance(image, torch.Tensor): 41 | image = preprocess_image(image) 42 | 43 | image_base64 = image_to_base64(image) 44 | 45 | # Prepare the API request payload 46 | payload = { 47 | "id_image_file": f"{image_base64}", 48 | "tailored_model_id": int(tailored_model_id), 49 | "tailored_model_influence": tailored_model_influence, 50 | "id_strength": id_strength, 51 | "seed": seed 52 | } 53 | 54 | headers = { 55 | "Content-Type": "application/json", 56 | "api_token": f"{api_key}" 57 | } 58 | 59 | try: 60 | response = requests.post(self.api_url, json=payload, headers=headers) 61 | # Check for successful response 62 | if response.status_code == 200: 63 | print('response is 200') 64 | # Process the output image from API response 65 | response_dict = response.json() 66 | image_response = requests.get(response_dict['image_res']) 67 | result_image = Image.open(io.BytesIO(image_response.content)) 68 | result_image = result_image.convert("RGB") 69 | result_image = np.array(result_image).astype(np.float32) / 255.0 70 | result_image = torch.from_numpy(result_image)[None,] 71 | return (result_image,) 72 | else: 73 | raise Exception(f"Error: API request failed with status code {response.status_code} {response.text}") 74 | 75 | except Exception as e: 76 | raise Exception(f"{e}") 77 | -------------------------------------------------------------------------------- /nodes/reimagine_node.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | from .common import deserialize_and_get_comfy_key, postprocess_image, preprocess_image, image_to_base64 4 | 5 | 6 | class ReimagineNode(): 7 | @classmethod 8 | def INPUT_TYPES(self): 9 | return { 10 | "required": { 11 | "api_key": ("STRING", ), 12 | "prompt": ("STRING",), 13 | }, 14 | "optional": { 15 | "seed": ("INT", {"default": -1}), 16 | "steps_num": ("INT", {"default": 12}), # if used with tailored, possibly get this from the tailored model info node 17 | "structure_ref_influence": ("FLOAT", {"default": 0.75}), 18 | "fast": ("INT", {"default": 0}), # if used with tailored, possibly get this from the tailored model info node 19 | "structure_image": ("IMAGE", ), 20 | "tailored_model_id": ("STRING", ), 21 | "tailored_model_influence": ("FLOAT", {"default": 0.5}), 22 | "tailored_generation_prefix": ("STRING",), # if used with tailored, possibly get this from the tailored model info node 23 | "content_moderation": ("INT", {"default": 0}), 24 | } 25 | } 26 | 27 | RETURN_TYPES = ("IMAGE",) 28 | RETURN_NAMES = ("output_image",) 29 | CATEGORY = "API Nodes" 30 | FUNCTION = "execute" # This is the method that will be executed 31 | 32 | def __init__(self): 33 | self.api_url = "https://engine.prod.bria-api.com/v1/reimagine" #"http://0.0.0.0:5000/v1/reimagine" 34 | 35 | def execute( 36 | self, api_key, prompt, seed, 37 | steps_num, fast, structure_ref_influence, structure_image=None, 38 | tailored_model_id=None, tailored_model_influence=None, tailored_generation_prefix=None, 39 | content_moderation=0, 40 | ): 41 | api_key = deserialize_and_get_comfy_key(api_key) 42 | payload = { 43 | "prompt": tailored_generation_prefix + prompt, 44 | "num_results": 1, 45 | "sync": True, 46 | "seed": seed, 47 | "steps_num": steps_num, 48 | "include_generation_prefix": False, 49 | "content_moderation": content_moderation, 50 | } 51 | if structure_image is not None: 52 | structure_image = preprocess_image(structure_image) 53 | structure_image = image_to_base64(structure_image) 54 | payload["structure_image_file"] = structure_image 55 | payload["structure_ref_influence"] = structure_ref_influence 56 | if tailored_model_id is not None and tailored_model_id != "": 57 | payload["tailored_model_id"] = tailored_model_id 58 | payload["tailored_model_influence"] = tailored_model_influence 59 | response = requests.post( 60 | self.api_url, 61 | json=payload, 62 | headers={"api_token": api_key} 63 | ) 64 | if response.status_code == 200: 65 | response_dict = response.json() 66 | image_response = requests.get(response_dict['result'][0]["urls"][0]) 67 | result_image = postprocess_image(image_response.content) 68 | return (result_image,) 69 | else: 70 | raise Exception(f"Error: API request failed with status code {response.status_code} and text {response.text}") 71 | -------------------------------------------------------------------------------- /workflows/tailored_workflow.json: -------------------------------------------------------------------------------- 1 | {"last_node_id":21,"last_link_id":49,"nodes":[{"id":2,"type":"TailoredModelInfoNode","pos":[480.2208557128906,641.4290161132812],"size":[315,122],"flags":{},"order":0,"mode":0,"inputs":[],"outputs":[{"name":"generation_prefix","type":"STRING","links":[2],"slot_index":0,"localized_name":"generation_prefix"},{"name":"default_fast","type":"INT","links":[46],"slot_index":1,"localized_name":"default_fast"},{"name":"default_steps_num","type":"INT","links":[40],"slot_index":2,"localized_name":"default_steps_num"}],"properties":{"Node name for S&R":"TailoredModelInfoNode"},"widgets_values":["",""]},{"id":3,"type":"PreviewImage","pos":[1728.9140625,688.2759399414062],"size":[210,246],"flags":{},"order":6,"mode":0,"inputs":[{"name":"images","type":"IMAGE","link":41,"localized_name":"images"}],"outputs":[],"properties":{"Node name for S&R":"PreviewImage"},"widgets_values":[]},{"id":11,"type":"LoadImage","pos":[693.87060546875,831.6130981445312],"size":[315,314],"flags":{},"order":1,"mode":0,"inputs":[],"outputs":[{"name":"IMAGE","type":"IMAGE","links":[48],"slot_index":0,"localized_name":"IMAGE"},{"name":"MASK","type":"MASK","links":null,"localized_name":"MASK"}],"properties":{"Node name for S&R":"LoadImage"},"widgets_values":["example.png","image"]},{"id":5,"type":"JjkShowText","pos":[847.2867431640625,591.890625],"size":[315,76],"flags":{},"order":4,"mode":0,"inputs":[{"name":"text","type":"STRING","link":2,"widget":{"name":"text"}}],"outputs":[{"name":"text","type":"STRING","links":[49],"slot_index":0,"shape":6,"localized_name":"text"}],"properties":{"Node name for S&R":"JjkShowText"},"widgets_values":["A photo of a character named Sami, a siamese cat with blue eyes, "]},{"id":15,"type":"BriaTailoredGen","pos":[1207.595947265625,645.6781005859375],"size":[456,438],"flags":{},"order":5,"mode":0,"inputs":[{"name":"guidance_method_1_image","type":"IMAGE","link":48,"shape":7,"localized_name":"guidance_method_1_image"},{"name":"guidance_method_2_image","type":"IMAGE","link":null,"shape":7,"localized_name":"guidance_method_2_image"},{"name":"generation_prefix","type":"STRING","link":49,"widget":{"name":"generation_prefix"},"shape":7},{"name":"fast","type":"INT","link":46,"widget":{"name":"fast"},"shape":7},{"name":"steps_num","type":"INT","link":40,"widget":{"name":"steps_num"},"shape":7}],"outputs":[{"name":"output_image","type":"IMAGE","links":[41],"slot_index":0,"localized_name":"output_image"}],"properties":{"Node name for S&R":"BriaTailoredGen"},"widgets_values":["","","a cat","","4:3",-1,"randomize",1,"","",1,"controlnet_canny",1,"controlnet_canny",1]},{"id":21,"type":"Note","pos":[1215.2469482421875,522.4407348632812],"size":[449.75360107421875,58],"flags":{},"order":3,"mode":0,"inputs":[],"outputs":[],"properties":{},"widgets_values":["You can get your BRIA API token at: https://bria.ai/api/"],"color":"#432","bgcolor":"#653"},{"id":19,"type":"Note","pos":[484.5993957519531,486.4328918457031],"size":[306.0655212402344,89.87609100341797],"flags":{},"order":2,"mode":0,"inputs":[],"outputs":[],"properties":{},"widgets_values":["This node is used to retrieve default settings and prompt prefixes for the chosen tailored model."],"color":"#432","bgcolor":"#653"}],"links":[[2,2,0,5,0,"STRING"],[40,2,2,15,4,"INT"],[41,15,0,3,0,"IMAGE"],[46,2,1,15,3,"INT"],[48,11,0,15,0,"IMAGE"],[49,5,0,15,2,"STRING"]],"groups":[],"config":{},"extra":{"ds":{"scale":0.7627768444385483,"offset":[-122.90473166350671,-300.2018923615813]},"node_versions":{"comfyui-bria-api":"c72754d15b53a13ee0c0419d70401232c56b7fdb","comfy-core":"v0.3.8-1-gc441048","ComfyUI-Jjk-Nodes":"b3c99bb78a99551776b5eab1a820e1cd58f84f31"}},"version":0.4} -------------------------------------------------------------------------------- /nodes/generate_structured_prompt_node_v2.py: -------------------------------------------------------------------------------- 1 | import requests 2 | from .common import ( 3 | deserialize_and_get_comfy_key, 4 | image_to_base64, 5 | poll_status_until_completed, 6 | preprocess_image, 7 | ) 8 | import torch 9 | 10 | class GenerateStructuredPromptNodeV2: 11 | """Standard Structured Prompt Generation Node""" 12 | 13 | api_url = "https://engine.prod.bria-api.com/v2/structured_prompt/generate" 14 | 15 | @classmethod 16 | def INPUT_TYPES(cls): 17 | return { 18 | "required": { 19 | "api_token": ("STRING", {"default": "BRIA_API_TOKEN"}), 20 | "prompt": ("STRING",), 21 | }, 22 | "optional": { 23 | "structured_prompt": ("STRING",), 24 | "images": ("IMAGE",), 25 | "seed": ("INT", {"default": 123456}), 26 | }, 27 | } 28 | 29 | RETURN_TYPES = ("STRING", "INT") 30 | RETURN_NAMES = ("structured_prompt", "seed") 31 | CATEGORY = "API Nodes" 32 | FUNCTION = "execute" 33 | 34 | def _validate_token(self, api_token: str): 35 | if api_token.strip() == "" or api_token.strip() == "BRIA_API_TOKEN": 36 | raise Exception("Please insert a valid API token.") 37 | 38 | def _build_payload( 39 | self, 40 | prompt, 41 | seed, 42 | structured_prompt, 43 | images=None 44 | ): 45 | payload = { 46 | "prompt": prompt, 47 | "seed": seed, 48 | } 49 | if structured_prompt: 50 | payload["structured_prompt"] = structured_prompt 51 | if images is not None: 52 | if isinstance(images, torch.Tensor): 53 | preprocess_images = preprocess_image(images) 54 | payload["images"] = [image_to_base64(preprocess_images)] 55 | return payload 56 | 57 | def execute( 58 | self, 59 | api_token, 60 | prompt, 61 | seed, 62 | structured_prompt, 63 | images=None, 64 | ): 65 | self._validate_token(api_token) 66 | payload = self._build_payload( 67 | prompt, 68 | seed, 69 | structured_prompt, 70 | images 71 | ) 72 | api_token = deserialize_and_get_comfy_key(api_token) 73 | 74 | headers = {"Content-Type": "application/json", "api_token": api_token} 75 | 76 | try: 77 | response = requests.post(self.api_url, json=payload, headers=headers) 78 | 79 | if response.status_code in (200, 202): 80 | print( 81 | f"Initial request successful to {self.api_url}, polling for completion..." 82 | ) 83 | response_dict = response.json() 84 | status_url = response_dict.get("status_url") 85 | request_id = response_dict.get("request_id") 86 | 87 | if not status_url: 88 | raise Exception("No status_url returned from API") 89 | 90 | print(f"Request ID: {request_id}, Status URL: {status_url}") 91 | 92 | final_response = poll_status_until_completed(status_url, api_token) 93 | 94 | result = final_response.get("result", {}) 95 | structured_prompt = result.get("structured_prompt", "") 96 | used_seed = result.get("seed", seed) 97 | 98 | return (structured_prompt, used_seed) 99 | 100 | raise Exception( 101 | f"Error: API request failed with status code {response.status_code} {response.text}" 102 | ) 103 | 104 | except Exception as e: 105 | raise Exception(f"{e}") -------------------------------------------------------------------------------- /nodes/generate_structured_prompt_lite_node_v2.py: -------------------------------------------------------------------------------- 1 | import requests 2 | from .common import ( 3 | deserialize_and_get_comfy_key, 4 | image_to_base64, 5 | poll_status_until_completed, 6 | preprocess_image, 7 | ) 8 | import torch 9 | 10 | 11 | class GenerateStructuredPromptLiteNodeV2: 12 | """Lite Structured Prompt Generation Node""" 13 | 14 | api_url = "https://engine.prod.bria-api.com/v2/structured_prompt/generate/lite" 15 | 16 | @classmethod 17 | def INPUT_TYPES(cls): 18 | return { 19 | "required": { 20 | "api_token": ("STRING", {"default": "BRIA_API_TOKEN"}), 21 | "prompt": ("STRING",), 22 | }, 23 | "optional": { 24 | "structured_prompt": ("STRING",), 25 | "images": ("IMAGE",), 26 | "seed": ("INT", {"default": 123456}), 27 | }, 28 | } 29 | 30 | RETURN_TYPES = ("STRING", "INT") 31 | RETURN_NAMES = ("structured_prompt", "seed") 32 | CATEGORY = "API Nodes" 33 | FUNCTION = "execute" 34 | 35 | def _validate_token(self, api_token: str): 36 | if api_token.strip() == "" or api_token.strip() == "BRIA_API_TOKEN": 37 | raise Exception("Please insert a valid API token.") 38 | 39 | def _build_payload( 40 | self, 41 | prompt, 42 | seed, 43 | structured_prompt, 44 | images=None 45 | ): 46 | payload = { 47 | "prompt": prompt, 48 | "seed": seed, 49 | } 50 | if structured_prompt: 51 | payload["structured_prompt"] = structured_prompt 52 | if images is not None: 53 | if isinstance(images, torch.Tensor): 54 | preprocess_images = preprocess_image(images) 55 | payload["images"] = [image_to_base64(preprocess_images)] 56 | return payload 57 | 58 | def execute( 59 | self, 60 | api_token, 61 | prompt, 62 | seed, 63 | structured_prompt, 64 | images=None, 65 | ): 66 | self._validate_token(api_token) 67 | payload = self._build_payload( 68 | prompt, 69 | seed, 70 | structured_prompt, 71 | images 72 | ) 73 | api_token = deserialize_and_get_comfy_key(api_token) 74 | 75 | headers = {"Content-Type": "application/json", "api_token": api_token} 76 | 77 | try: 78 | response = requests.post(self.api_url, json=payload, headers=headers) 79 | 80 | if response.status_code in (200, 202): 81 | print( 82 | f"Initial request successful to {self.api_url}, polling for completion..." 83 | ) 84 | response_dict = response.json() 85 | status_url = response_dict.get("status_url") 86 | request_id = response_dict.get("request_id") 87 | 88 | if not status_url: 89 | raise Exception("No status_url returned from API") 90 | 91 | print(f"Request ID: {request_id}, Status URL: {status_url}") 92 | 93 | final_response = poll_status_until_completed(status_url, api_token) 94 | 95 | result = final_response.get("result", {}) 96 | structured_prompt = result.get("structured_prompt", "") 97 | used_seed = result.get("seed", seed) 98 | 99 | return (structured_prompt, used_seed) 100 | 101 | raise Exception( 102 | f"Error: API request failed with status code {response.status_code} {response.text}" 103 | ) 104 | 105 | except Exception as e: 106 | raise Exception(f"{e}") -------------------------------------------------------------------------------- /nodes/rmbg_node.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import requests 3 | from PIL import Image 4 | import io 5 | import torch 6 | 7 | from .common import deserialize_and_get_comfy_key, preprocess_image, image_to_base64, poll_status_until_completed 8 | 9 | class RmbgNode(): 10 | @classmethod 11 | def INPUT_TYPES(self): 12 | return { 13 | "required": { 14 | "image": ("IMAGE",), # Input image from another node 15 | "api_key": ("STRING", {"default": "BRIA_API_TOKEN"}), # API Key input with a default value 16 | }, 17 | "optional": { 18 | "visual_input_content_moderation": ("BOOLEAN", {"default": False}), 19 | "visual_output_content_moderation": ("BOOLEAN", {"default": False}), 20 | "preserve_alpha": ("BOOLEAN", {"default": True}), 21 | 22 | } 23 | } 24 | 25 | RETURN_TYPES = ("IMAGE",) 26 | RETURN_NAMES = ("output_image",) 27 | CATEGORY = "API Nodes" 28 | FUNCTION = "execute" # This is the method that will be executed 29 | 30 | def __init__(self): 31 | self.api_url = "https://engine.prod.bria-api.com/v2/image/edit/remove_background" # RMBG API URL 32 | 33 | # Define the execute method as expected by ComfyUI 34 | def execute(self, image, visual_input_content_moderation, visual_output_content_moderation, preserve_alpha, api_key): 35 | if api_key.strip() == "" or api_key.strip() == "BRIA_API_TOKEN": 36 | raise Exception("Please insert a valid API key.") 37 | api_key = deserialize_and_get_comfy_key(api_key) 38 | # Check if image is tensor, if so, convert to NumPy array 39 | if isinstance(image, torch.Tensor): 40 | image = preprocess_image(image) 41 | 42 | # Convert image to base64 for the new API format 43 | image_base64 = image_to_base64(image) 44 | payload = { 45 | "image": image_base64, 46 | "visual_input_content_moderation": visual_input_content_moderation, 47 | "visual_output_content_moderation":visual_output_content_moderation, 48 | "preserve_alpha":preserve_alpha 49 | } 50 | 51 | headers = { 52 | "Content-Type": "application/json", 53 | "api_token": f"{api_key}" 54 | } 55 | 56 | try: 57 | response = requests.post(self.api_url, json=payload, headers=headers) 58 | 59 | if response.status_code == 200 or response.status_code == 202: 60 | print('Initial RMBG request successful, polling for completion...') 61 | response_dict = response.json() 62 | 63 | status_url = response_dict.get('status_url') 64 | request_id = response_dict.get('request_id') 65 | 66 | if not status_url: 67 | raise Exception("No status_url returned from API") 68 | 69 | print(f"Request ID: {request_id}, Status URL: {status_url}") 70 | 71 | final_response = poll_status_until_completed(status_url, api_key) 72 | 73 | # Get the result image URL 74 | result_image_url = final_response['result']['image_url'] 75 | 76 | # Download and process the result image 77 | image_response = requests.get(result_image_url) 78 | result_image = Image.open(io.BytesIO(image_response.content)) 79 | result_image = np.array(result_image).astype(np.float32) / 255.0 80 | result_image = torch.from_numpy(result_image)[None,] 81 | 82 | return (result_image,) 83 | else: 84 | raise Exception(f"Error: API request failed with status code {response.status_code} {response.text}") 85 | 86 | except Exception as e: 87 | raise Exception(f"{e}") 88 | -------------------------------------------------------------------------------- /workflows/text_to_image_workflow.json: -------------------------------------------------------------------------------- 1 | { 2 | "last_node_id": 13, 3 | "last_link_id": 11, 4 | "nodes": [ 5 | { 6 | "id": 12, 7 | "type": "LoadImage", 8 | "pos": [ 9 | 669.7035522460938, 10 | 136.97129821777344 11 | ], 12 | "size": [ 13 | 315, 14 | 314 15 | ], 16 | "flags": {}, 17 | "order": 0, 18 | "mode": 0, 19 | "inputs": [], 20 | "outputs": [ 21 | { 22 | "name": "IMAGE", 23 | "type": "IMAGE", 24 | "links": [ 25 | 10 26 | ], 27 | "slot_index": 0 28 | }, 29 | { 30 | "name": "MASK", 31 | "type": "MASK", 32 | "links": null 33 | } 34 | ], 35 | "properties": { 36 | "Node name for S&R": "LoadImage" 37 | }, 38 | "widgets_values": [ 39 | "pexels-photo-3246665.png", 40 | "image" 41 | ] 42 | }, 43 | { 44 | "id": 11, 45 | "type": "PreviewImage", 46 | "pos": [ 47 | 1587.27001953125, 48 | 97.39167022705078 49 | ], 50 | "size": [ 51 | 210, 52 | 246 53 | ], 54 | "flags": {}, 55 | "order": 2, 56 | "mode": 0, 57 | "inputs": [ 58 | { 59 | "name": "images", 60 | "type": "IMAGE", 61 | "link": 11 62 | } 63 | ], 64 | "outputs": [], 65 | "properties": { 66 | "Node name for S&R": "PreviewImage" 67 | } 68 | }, 69 | { 70 | "id": 10, 71 | "type": "Text2ImageFastNode", 72 | "pos": [ 73 | 1058.320556640625, 74 | 96.64427185058594 75 | ], 76 | "size": [ 77 | 438.71258544921875, 78 | 394.27716064453125 79 | ], 80 | "flags": {}, 81 | "order": 1, 82 | "mode": 0, 83 | "inputs": [ 84 | { 85 | "name": "guidance_method_1_image", 86 | "type": "IMAGE", 87 | "link": null, 88 | "shape": 7 89 | }, 90 | { 91 | "name": "guidance_method_2_image", 92 | "type": "IMAGE", 93 | "link": null, 94 | "shape": 7 95 | }, 96 | { 97 | "name": "image_prompt_image", 98 | "type": "IMAGE", 99 | "link": 10, 100 | "shape": 7 101 | } 102 | ], 103 | "outputs": [ 104 | { 105 | "name": "output_image", 106 | "type": "IMAGE", 107 | "links": [ 108 | 11 109 | ], 110 | "slot_index": 0 111 | } 112 | ], 113 | "properties": { 114 | "Node name for S&R": "Text2ImageFastNode" 115 | }, 116 | "widgets_values": [ 117 | "BRIA_API_TOKEN", 118 | "A drawing of a lion on a table.\t", 119 | "4:3", 120 | 990, 121 | "randomize", 122 | 8, 123 | 0, 124 | "controlnet_canny", 125 | 1, 126 | "controlnet_canny", 127 | 1, 128 | "regular", 129 | 1 130 | ] 131 | } 132 | ], 133 | "links": [ 134 | [ 135 | 10, 136 | 12, 137 | 0, 138 | 10, 139 | 2, 140 | "IMAGE" 141 | ], 142 | [ 143 | 11, 144 | 10, 145 | 0, 146 | 11, 147 | 0, 148 | "IMAGE" 149 | ] 150 | ], 151 | "groups": [], 152 | "config": {}, 153 | "extra": { 154 | "ds": { 155 | "scale": 0.7513148009015777, 156 | "offset": [ 157 | 11.112206386364164, 158 | 66.47311795454547 159 | ] 160 | }, 161 | "node_versions": { 162 | "comfy-core": "v0.3.10-42-gff83865", 163 | "comfyui-bria-api": "499ec5d104cc5110407eafce468ce1d47ac168b3" 164 | }, 165 | "VHS_latentpreview": false, 166 | "VHS_latentpreviewrate": 0 167 | }, 168 | "version": 0.4 169 | } -------------------------------------------------------------------------------- /nodes/remove_foreground_node.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import requests 3 | from PIL import Image 4 | import io 5 | import torch 6 | 7 | from .common import deserialize_and_get_comfy_key, preprocess_image, image_to_base64, poll_status_until_completed 8 | 9 | 10 | class RemoveForegroundNode(): 11 | @classmethod 12 | def INPUT_TYPES(self): 13 | return { 14 | "required": { 15 | "image": ("IMAGE",), # Input image from another node 16 | "api_key": ("STRING", {"default": "BRIA_API_TOKEN"}), # API Key input with a default value 17 | }, 18 | "optional": { 19 | "visual_input_content_moderation": ("BOOLEAN", {"default": False}), 20 | "visual_output_content_moderation": ("BOOLEAN", {"default": False}), 21 | "preserve_alpha": ("BOOLEAN", {"default": True}), 22 | } 23 | } 24 | 25 | RETURN_TYPES = ("IMAGE",) 26 | RETURN_NAMES = ("output_image",) 27 | CATEGORY = "API Nodes" 28 | FUNCTION = "execute" # This is the method that will be executed 29 | 30 | def __init__(self): 31 | self.api_url = "https://engine.prod.bria-api.com/v2/image/edit/erase_foreground" # remove foreground API URL 32 | 33 | # Define the execute method as expected by ComfyUI 34 | def execute(self, image, visual_input_content_moderation, visual_output_content_moderation, preserve_alpha, api_key): 35 | if api_key.strip() == "" or api_key.strip() == "BRIA_API_TOKEN": 36 | raise Exception("Please insert a valid API key.") 37 | api_key = deserialize_and_get_comfy_key(api_key) 38 | 39 | # Check if image is tensor, if so, convert to NumPy array 40 | if isinstance(image, torch.Tensor): 41 | image = preprocess_image(image) 42 | 43 | # Prepare the API request payload 44 | # temporary save the image to /tmp 45 | # temp_img_path = "/tmp/temp_img.jpeg" 46 | # image.save(temp_img_path, format="JPEG") 47 | 48 | # files=[('file',('temp_img.jpeg', open(temp_img_path, 'rb'),'image/jpeg')) 49 | # ] 50 | payload = { 51 | "image": image_to_base64(image), 52 | "visual_input_content_moderation": visual_input_content_moderation, 53 | "visual_output_content_moderation":visual_output_content_moderation, 54 | "preserve_alpha": preserve_alpha 55 | } 56 | 57 | headers = { 58 | "Content-Type": "application/json", 59 | "api_token": f"{api_key}" 60 | } 61 | 62 | try: 63 | response = requests.post(self.api_url, json=payload, headers=headers) 64 | 65 | if response.status_code == 200 or response.status_code == 202: 66 | print('Initial request successful, polling for completion...') 67 | response_dict = response.json() 68 | status_url = response_dict.get('status_url') 69 | request_id = response_dict.get('request_id') 70 | 71 | if not status_url: 72 | raise Exception("No status_url returned from API") 73 | 74 | print(f"Request ID: {request_id}, Status URL: {status_url}") 75 | 76 | # Poll status URL until completion 77 | final_response = poll_status_until_completed(status_url, api_key) 78 | 79 | # Get the result image URL 80 | result_image_url = final_response['result']['image_url'] 81 | 82 | image_response = requests.get(result_image_url) 83 | result_image = Image.open(io.BytesIO(image_response.content)) 84 | result_image = np.array(result_image).astype(np.float32) / 255.0 85 | result_image = torch.from_numpy(result_image)[None,] 86 | return (result_image,) 87 | else: 88 | raise Exception(f"Error: API request failed with status code {response.status_code} {response.text}") 89 | 90 | except Exception as e: 91 | raise Exception(f"{e}") 92 | -------------------------------------------------------------------------------- /workflows/background_generation_workflow.json: -------------------------------------------------------------------------------- 1 | {"last_node_id":39,"last_link_id":65,"nodes":[{"id":34,"type":"LoadImage","pos":[19.94045066833496,1075.806640625],"size":[315,314],"flags":{},"order":0,"mode":0,"inputs":[],"outputs":[{"name":"IMAGE","type":"IMAGE","links":[61],"slot_index":0,"localized_name":"IMAGE"},{"name":"MASK","type":"MASK","links":null,"localized_name":"MASK"}],"properties":{"Node name for S&R":"LoadImage"},"widgets_values":["quirky-red-brick-brick-wallpaper.jpg","image"]},{"id":36,"type":"PreviewImage","pos":[468.7108154296875,1210.288818359375],"size":[210,246],"flags":{},"order":5,"mode":0,"inputs":[{"name":"images","type":"IMAGE","link":63,"localized_name":"images"}],"outputs":[],"properties":{"Node name for S&R":"PreviewImage"},"widgets_values":[]},{"id":33,"type":"PreviewImage","pos":[1317.4083251953125,1118.4864501953125],"size":[210,246],"flags":{},"order":8,"mode":0,"inputs":[{"name":"images","type":"IMAGE","link":59,"localized_name":"images"}],"outputs":[],"properties":{"Node name for S&R":"PreviewImage"},"widgets_values":[]},{"id":31,"type":"PreviewImage","pos":[1668.734619140625,796.7755737304688],"size":[210,246],"flags":{},"order":10,"mode":0,"inputs":[{"name":"images","type":"IMAGE","link":57,"localized_name":"images"}],"outputs":[],"properties":{"Node name for S&R":"PreviewImage"},"widgets_values":[]},{"id":29,"type":"PreviewImage","pos":[872.8858032226562,679.9429321289062],"size":[210,246],"flags":{},"order":6,"mode":0,"inputs":[{"name":"images","type":"IMAGE","link":55,"localized_name":"images"}],"outputs":[],"properties":{"Node name for S&R":"PreviewImage"},"widgets_values":[]},{"id":28,"type":"LoadImage","pos":[29.629886627197266,676.8370361328125],"size":[315,314],"flags":{},"order":1,"mode":0,"inputs":[],"outputs":[{"name":"IMAGE","type":"IMAGE","links":[54],"slot_index":0,"localized_name":"IMAGE"},{"name":"MASK","type":"MASK","links":null,"localized_name":"MASK"}],"properties":{"Node name for S&R":"LoadImage"},"widgets_values":["pexels-photo-1808399.jpeg","image"]},{"id":35,"type":"RemoveForegroundNode","pos":[414.2638854980469,1079.1705322265625],"size":[315,58],"flags":{},"order":3,"mode":0,"inputs":[{"name":"image","type":"IMAGE","link":61,"localized_name":"image"}],"outputs":[{"name":"output_image","type":"IMAGE","links":[62,63],"slot_index":0,"localized_name":"output_image"}],"properties":{"Node name for S&R":"RemoveForegroundNode"},"widgets_values":["BRIA_API_TOKEN"]},{"id":32,"type":"ReplaceBgNode","pos":[834.6115112304688,1063.0406494140625],"size":[315,294],"flags":{},"order":7,"mode":0,"inputs":[{"name":"image","type":"IMAGE","link":65,"localized_name":"image"},{"name":"ref_image","type":"IMAGE","link":62,"shape":7,"localized_name":"ref_image"}],"outputs":[{"name":"output_image","type":"IMAGE","links":[59,64],"slot_index":0,"localized_name":"output_image"}],"properties":{"Node name for S&R":"ReplaceBgNode"},"widgets_values":["BRIA_API_TOKEN",false,"",true,true,true,false,"",1978,"randomize"]},{"id":30,"type":"ImageExpansionNode","pos":[1265.001220703125,798.8323974609375],"size":[315,226],"flags":{},"order":9,"mode":0,"inputs":[{"name":"image","type":"IMAGE","link":64,"localized_name":"image"}],"outputs":[{"name":"output_image","type":"IMAGE","links":[57],"slot_index":0,"localized_name":"output_image"}],"properties":{"Node name for S&R":"ImageExpansionNode"},"widgets_values":["600,760","200, 0","BRIA_API_TOKEN","1200, 800","",1729,"randomize","Ugly, mutated"]},{"id":38,"type":"Note","pos":[436.7110900878906,566.2047119140625],"size":[306.28387451171875,58],"flags":{},"order":2,"mode":0,"inputs":[],"outputs":[],"properties":{},"widgets_values":["You can get your BRIA API token at:\nhttps://bria.ai/api/"],"color":"#432","bgcolor":"#653"},{"id":27,"type":"RmbgNode","pos":[430.8815002441406,678.7727661132812],"size":[315,58],"flags":{},"order":4,"mode":0,"inputs":[{"name":"image","type":"IMAGE","link":54,"localized_name":"image"}],"outputs":[{"name":"output_image","type":"IMAGE","links":[55,65],"slot_index":0,"localized_name":"output_image"}],"properties":{"Node name for S&R":"RmbgNode"},"widgets_values":["BRIA_API_TOKEN"]}],"links":[[51,5,0,15,2,"STRING"],[54,28,0,27,0,"IMAGE"],[55,27,0,29,0,"IMAGE"],[57,30,0,31,0,"IMAGE"],[59,32,0,33,0,"IMAGE"],[61,34,0,35,0,"IMAGE"],[62,35,0,32,1,"IMAGE"],[63,35,0,36,0,"IMAGE"],[64,32,0,30,0,"IMAGE"],[65,27,0,32,0,"IMAGE"]],"groups":[],"config":{},"extra":{"ds":{"scale":0.8140274938684037,"offset":[101.53311990208498,-477.0342684311694]},"node_versions":{"comfyui-bria-api":"c72754d15b53a13ee0c0419d70401232c56b7fdb","comfy-core":"v0.3.8-1-gc441048","ComfyUI-Jjk-Nodes":"b3c99bb78a99551776b5eab1a820e1cd58f84f31"}},"version":0.4} -------------------------------------------------------------------------------- /nodes/generative_fill_node.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import requests 3 | from PIL import Image 4 | import io 5 | import torch 6 | 7 | from .common import deserialize_and_get_comfy_key, preprocess_image, preprocess_mask, image_to_base64, poll_status_until_completed 8 | 9 | 10 | class GenFillNode(): 11 | @classmethod 12 | def INPUT_TYPES(self): 13 | return { 14 | "required": { 15 | "image": ("IMAGE",), # Input image from another node 16 | "mask": ("MASK",), # Binary mask input 17 | "prompt": ("STRING",), 18 | "api_key": ("STRING", {"default": "BRIA_API_TOKEN"}), # API Key input with a default value 19 | }, 20 | "optional": { 21 | "seed": ("INT", {"default": 123456}), 22 | "prompt_content_moderation": ("BOOLEAN", {"default": True}), 23 | "visual_input_content_moderation": ("BOOLEAN", {"default": False}), 24 | "visual_output_content_moderation": ("BOOLEAN", {"default": False}), 25 | 26 | 27 | } 28 | } 29 | 30 | RETURN_TYPES = ("IMAGE",) 31 | RETURN_NAMES = ("output_image",) 32 | CATEGORY = "API Nodes" 33 | FUNCTION = "execute" # This is the method that will be executed 34 | 35 | def __init__(self): 36 | self.api_url = "https://engine.prod.bria-api.com/v2/image/edit/gen_fill" 37 | 38 | # Define the execute method as expected by ComfyUI 39 | def execute(self, image, mask, prompt, api_key, seed, prompt_content_moderation, visual_input_content_moderation, visual_output_content_moderation): 40 | if api_key.strip() == "" or api_key.strip() == "BRIA_API_TOKEN": 41 | raise Exception("Please insert a valid API key.") 42 | api_key = deserialize_and_get_comfy_key(api_key) 43 | 44 | # Check if image and mask are tensors, if so, convert to NumPy arrays 45 | if isinstance(image, torch.Tensor): 46 | image = preprocess_image(image) 47 | if isinstance(mask, torch.Tensor): 48 | mask = preprocess_mask(mask) 49 | 50 | # Convert the image and mask directly to Base64 strings 51 | image_base64 = image_to_base64(image) 52 | mask_base64 = image_to_base64(mask) 53 | 54 | # Prepare the API request payload 55 | payload = { 56 | "image": image_base64, 57 | "mask": mask_base64, 58 | "prompt": prompt, 59 | "negative_prompt": "blurry", 60 | "seed": seed, 61 | "prompt_content_moderation":prompt_content_moderation, 62 | "visual_input_content_moderation":visual_input_content_moderation, 63 | "visual_output_content_moderation":visual_output_content_moderation, 64 | "version": 2 65 | } 66 | 67 | headers = { 68 | "Content-Type": "application/json", 69 | "api_token": f"{api_key}" 70 | } 71 | 72 | try: 73 | # Send initial request to get status URL 74 | response = requests.post(self.api_url, json=payload, headers=headers) 75 | 76 | if response.status_code == 200 or response.status_code == 202: 77 | print('Initial genfill request successful, polling for completion...') 78 | response_dict = response.json() 79 | status_url = response_dict.get('status_url') 80 | request_id = response_dict.get('request_id') 81 | 82 | if not status_url: 83 | raise Exception("No status_url returned from API") 84 | 85 | print(f"Request ID: {request_id}, Status URL: {status_url}") 86 | 87 | final_response = poll_status_until_completed(status_url, api_key) 88 | result_image_url = final_response['result']['image_url'] 89 | image_response = requests.get(result_image_url) 90 | result_image = Image.open(io.BytesIO(image_response.content)) 91 | result_image = result_image.convert("RGB") 92 | result_image = np.array(result_image).astype(np.float32) / 255.0 93 | result_image = torch.from_numpy(result_image)[None,] 94 | return (result_image,) 95 | else: 96 | raise Exception(f"Error: API request failed with status code {response.status_code} {response.text}") 97 | 98 | except Exception as e: 99 | raise Exception(f"{e}") 100 | -------------------------------------------------------------------------------- /nodes/tailored_gen_node.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | from .common import deserialize_and_get_comfy_key, postprocess_image, preprocess_image, image_to_base64 4 | 5 | 6 | class TailoredGenNode(): 7 | @classmethod 8 | def INPUT_TYPES(self): 9 | return { 10 | "required": { 11 | "model_id": ("STRING",), 12 | "api_key": ("STRING", ), 13 | }, 14 | "optional": { 15 | "prompt": ("STRING",), 16 | "generation_prefix": ("STRING",), # possibly get this from the tailored model info node 17 | "aspect_ratio": (["1:1", "2:3", "3:2", "3:4", "4:3", "4:5", "5:4", "9:16", "16:9"], {"default": "4:3"}), 18 | "seed": ("INT", {"default": -1}), 19 | "model_influence": ("FLOAT", {"default": 1.0}), 20 | "negative_prompt": ("STRING", {"default": ""}), 21 | "fast": ("INT", {"default": 1}), # possibly get this from the tailored model info node 22 | "steps_num": ("INT", {"default": 8}), # possibly get this from the tailored model info node 23 | "guidance_method_1": (["controlnet_canny", "controlnet_depth", "controlnet_recoloring", "controlnet_color_grid"], {"default": "controlnet_canny"}), 24 | "guidance_method_1_scale": ("FLOAT", {"default": 1.0}), 25 | "guidance_method_1_image": ("IMAGE", ), 26 | "guidance_method_2": (["controlnet_canny", "controlnet_depth", "controlnet_recoloring", "controlnet_color_grid"], {"default": "controlnet_canny"}), 27 | "guidance_method_2_scale": ("FLOAT", {"default": 1.0}), 28 | "guidance_method_2_image": ("IMAGE", ), 29 | "content_moderation": ("INT", {"default": 0}), 30 | } 31 | } 32 | 33 | RETURN_TYPES = ("IMAGE",) 34 | RETURN_NAMES = ("output_image",) 35 | CATEGORY = "API Nodes" 36 | FUNCTION = "execute" # This is the method that will be executed 37 | 38 | def __init__(self): 39 | self.api_url = "https://engine.prod.bria-api.com/v1/text-to-image/tailored/" #"http://0.0.0.0:5000/v1/text-to-image/tailored/" 40 | 41 | def execute( 42 | self, model_id, api_key, prompt, generation_prefix, aspect_ratio, 43 | seed, model_influence, negative_prompt, fast, steps_num, 44 | guidance_method_1=None, guidance_method_1_scale=None, guidance_method_1_image=None, 45 | guidance_method_2=None, guidance_method_2_scale=None, guidance_method_2_image=None, 46 | content_moderation=0, 47 | ): 48 | api_key = deserialize_and_get_comfy_key(api_key) 49 | payload = { 50 | "prompt": generation_prefix + prompt, 51 | "num_results": 1, 52 | "aspect_ratio": aspect_ratio, 53 | "sync": True, 54 | "seed": seed, 55 | "model_influence": model_influence, 56 | "negative_prompt": negative_prompt, 57 | "fast": fast, 58 | "steps_num": steps_num, 59 | "include_generation_prefix": False, 60 | "content_moderation": content_moderation, 61 | } 62 | if guidance_method_1_image is not None: 63 | guidance_method_1_image = preprocess_image(guidance_method_1_image) 64 | guidance_method_1_image = image_to_base64(guidance_method_1_image) 65 | payload["guidance_method_1"] = guidance_method_1 66 | payload["guidance_method_1_scale"] = guidance_method_1_scale 67 | payload["guidance_method_1_image_file"] = guidance_method_1_image 68 | if guidance_method_2_image is not None: 69 | guidance_method_2_image = preprocess_image(guidance_method_2_image) 70 | guidance_method_2_image = image_to_base64(guidance_method_2_image) 71 | payload["guidance_method_2"] = guidance_method_2 72 | payload["guidance_method_2_scale"] = guidance_method_2_scale 73 | payload["guidance_method_2_image_file"] = guidance_method_2_image 74 | response = requests.post( 75 | self.api_url + model_id, 76 | json=payload, 77 | headers={"api_token": api_key} 78 | ) 79 | if response.status_code == 200: 80 | response_dict = response.json() 81 | image_response = requests.get(response_dict['result'][0]["urls"][0]) 82 | result_image = postprocess_image(image_response.content) 83 | return (result_image,) 84 | else: 85 | raise Exception(f"Error: API request failed with status code {response.status_code} and text {response.text}") 86 | -------------------------------------------------------------------------------- /nodes/text_2_image_fast_node.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | from .common import deserialize_and_get_comfy_key, postprocess_image, preprocess_image, image_to_base64 4 | 5 | 6 | class Text2ImageFastNode(): 7 | @classmethod 8 | def INPUT_TYPES(self): 9 | return { 10 | "required": { 11 | "api_key": ("STRING", ), 12 | }, 13 | "optional": { 14 | "prompt": ("STRING",), 15 | "aspect_ratio": (["1:1", "2:3", "3:2", "3:4", "4:3", "4:5", "5:4", "9:16", "16:9"], {"default": "4:3"}), 16 | "seed": ("INT", {"default": -1}), 17 | "steps_num": ("INT", {"default": 8}), 18 | "prompt_enhancement": ("INT", {"default": 0}), 19 | "guidance_method_1": (["controlnet_canny", "controlnet_depth", "controlnet_recoloring", "controlnet_color_grid"], {"default": "controlnet_canny"}), 20 | "guidance_method_1_scale": ("FLOAT", {"default": 1.0}), 21 | "guidance_method_1_image": ("IMAGE", ), 22 | "guidance_method_2": (["controlnet_canny", "controlnet_depth", "controlnet_recoloring", "controlnet_color_grid"], {"default": "controlnet_canny"}), 23 | "guidance_method_2_scale": ("FLOAT", {"default": 1.0}), 24 | "guidance_method_2_image": ("IMAGE", ), 25 | "image_prompt_mode": (["regular", "style_only"], {"default": "regular"}), 26 | "image_prompt_image": ("IMAGE", ), 27 | "image_prompt_scale": ("FLOAT", {"default": 1.0}), 28 | "content_moderation": ("INT", {"default": 0}), 29 | } 30 | } 31 | 32 | RETURN_TYPES = ("IMAGE",) 33 | RETURN_NAMES = ("output_image",) 34 | CATEGORY = "API Nodes" 35 | FUNCTION = "execute" 36 | 37 | def __init__(self): 38 | self.api_url = "https://engine.prod.bria-api.com/v1/text-to-image/fast/2.3" #"http://0.0.0.0:5000/v1/text-to-image/fast/2.3" 39 | 40 | def execute( 41 | self, api_key, prompt, aspect_ratio, seed, 42 | steps_num, prompt_enhancement, 43 | guidance_method_1=None, guidance_method_1_scale=None, guidance_method_1_image=None, 44 | guidance_method_2=None, guidance_method_2_scale=None, guidance_method_2_image=None, 45 | image_prompt_mode=None, image_prompt_image=None, image_prompt_scale=None, 46 | content_moderation=0, 47 | ): 48 | api_key = deserialize_and_get_comfy_key(api_key) 49 | payload = { 50 | "prompt": prompt, 51 | "num_results": 1, 52 | "aspect_ratio": aspect_ratio, 53 | "sync": True, 54 | "seed": seed, 55 | "steps_num": steps_num, 56 | "prompt_enhancement": prompt_enhancement, 57 | "content_moderation": content_moderation, 58 | } 59 | if guidance_method_1_image is not None: 60 | guidance_method_1_image = preprocess_image(guidance_method_1_image) 61 | guidance_method_1_image = image_to_base64(guidance_method_1_image) 62 | payload["guidance_method_1"] = guidance_method_1 63 | payload["guidance_method_1_scale"] = guidance_method_1_scale 64 | payload["guidance_method_1_image_file"] = guidance_method_1_image 65 | if guidance_method_2_image is not None: 66 | guidance_method_2_image = preprocess_image(guidance_method_2_image) 67 | guidance_method_2_image = image_to_base64(guidance_method_2_image) 68 | payload["guidance_method_2"] = guidance_method_2 69 | payload["guidance_method_2_scale"] = guidance_method_2_scale 70 | payload["guidance_method_2_image_file"] = guidance_method_2_image 71 | if image_prompt_image is not None: 72 | image_prompt_image = preprocess_image(image_prompt_image) 73 | image_prompt_image = image_to_base64(image_prompt_image) 74 | payload["image_prompt_mode"] = image_prompt_mode 75 | payload["image_prompt_file"] = image_prompt_image 76 | payload["image_prompt_scale"] = image_prompt_scale 77 | response = requests.post( 78 | self.api_url, 79 | json=payload, 80 | headers={"api_token": api_key} 81 | ) 82 | if response.status_code == 200: 83 | response_dict = response.json() 84 | image_response = requests.get(response_dict['result'][0]["urls"][0]) 85 | result_image = postprocess_image(image_response.content) 86 | return (result_image,) 87 | else: 88 | raise Exception(f"Error: API request failed with status code {response.status_code} and text {response.text}") 89 | -------------------------------------------------------------------------------- /nodes/text_2_image_base_node.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | from .common import deserialize_and_get_comfy_key, postprocess_image, preprocess_image, image_to_base64 4 | 5 | 6 | class Text2ImageBaseNode(): 7 | @classmethod 8 | def INPUT_TYPES(self): 9 | return { 10 | "required": { 11 | "api_key": ("STRING", ), 12 | }, 13 | "optional": { 14 | "prompt": ("STRING",), 15 | "aspect_ratio": (["1:1", "2:3", "3:2", "3:4", "4:3", "4:5", "5:4", "9:16", "16:9"], {"default": "4:3"}), 16 | "seed": ("INT", {"default": -1}), 17 | "negative_prompt": ("STRING", {"default": ""}), 18 | "steps_num": ("INT", {"default": 30}), 19 | "prompt_enhancement": ("INT", {"default": 0}), 20 | "text_guidance_scale": ("INT", {"default": 5}), 21 | "medium": (["photography", "art", "none"], {"default": "none"}), 22 | "guidance_method_1": (["controlnet_canny", "controlnet_depth", "controlnet_recoloring", "controlnet_color_grid"], {"default": "controlnet_canny"}), 23 | "guidance_method_1_scale": ("FLOAT", {"default": 1.0}), 24 | "guidance_method_1_image": ("IMAGE", ), 25 | "guidance_method_2": (["controlnet_canny", "controlnet_depth", "controlnet_recoloring", "controlnet_color_grid"], {"default": "controlnet_canny"}), 26 | "guidance_method_2_scale": ("FLOAT", {"default": 1.0}), 27 | "guidance_method_2_image": ("IMAGE", ), 28 | "image_prompt_mode": (["regular", "style_only"], {"default": "regular"}), 29 | "image_prompt_image": ("IMAGE", ), 30 | "image_prompt_scale": ("FLOAT", {"default": 1.0}), 31 | "content_moderation": ("INT", {"default": 0}), 32 | } 33 | } 34 | 35 | RETURN_TYPES = ("IMAGE",) 36 | RETURN_NAMES = ("output_image",) 37 | CATEGORY = "API Nodes" 38 | FUNCTION = "execute" # This is the method that will be executed 39 | 40 | def __init__(self): 41 | self.api_url = "https://engine.prod.bria-api.com/v1/text-to-image/base/3.2" 42 | 43 | def execute( 44 | self, api_key, prompt, aspect_ratio, seed, negative_prompt, 45 | steps_num, prompt_enhancement, text_guidance_scale, medium, 46 | guidance_method_1=None, guidance_method_1_scale=None, guidance_method_1_image=None, 47 | guidance_method_2=None, guidance_method_2_scale=None, guidance_method_2_image=None, 48 | image_prompt_mode=None, image_prompt_image=None, image_prompt_scale=None, 49 | content_moderation=0, 50 | ): 51 | api_key = deserialize_and_get_comfy_key(api_key) 52 | payload = { 53 | "prompt": prompt, 54 | "num_results": 1, 55 | "aspect_ratio": aspect_ratio, 56 | "sync": True, 57 | "seed": seed, 58 | "negative_prompt": negative_prompt, 59 | "steps_num": steps_num, 60 | "text_guidance_scale": text_guidance_scale, 61 | "prompt_enhancement": prompt_enhancement, 62 | "content_moderation": content_moderation, 63 | } 64 | if medium != "none": 65 | payload["medium"] = medium 66 | if guidance_method_1_image is not None: 67 | guidance_method_1_image = preprocess_image(guidance_method_1_image) 68 | guidance_method_1_image = image_to_base64(guidance_method_1_image) 69 | payload["guidance_method_1"] = guidance_method_1 70 | payload["guidance_method_1_scale"] = guidance_method_1_scale 71 | payload["guidance_method_1_image_file"] = guidance_method_1_image 72 | if guidance_method_2_image is not None: 73 | guidance_method_2_image = preprocess_image(guidance_method_2_image) 74 | guidance_method_2_image = image_to_base64(guidance_method_2_image) 75 | payload["guidance_method_2"] = guidance_method_2 76 | payload["guidance_method_2_scale"] = guidance_method_2_scale 77 | payload["guidance_method_2_image_file"] = guidance_method_2_image 78 | if image_prompt_image is not None: 79 | image_prompt_image = preprocess_image(image_prompt_image) 80 | image_prompt_image = image_to_base64(image_prompt_image) 81 | payload["image_prompt_mode"] = image_prompt_mode 82 | payload["image_prompt_file"] = image_prompt_image 83 | payload["image_prompt_scale"] = image_prompt_scale 84 | response = requests.post( 85 | self.api_url, 86 | json=payload, 87 | headers={"api_token": api_key} 88 | ) 89 | if response.status_code == 200: 90 | response_dict = response.json() 91 | image_response = requests.get(response_dict['result'][0]["urls"][0]) 92 | result_image = postprocess_image(image_response.content) 93 | return (result_image,) 94 | else: 95 | raise Exception(f"Error: API request failed with status code {response.status_code} and text {response.text}") 96 | -------------------------------------------------------------------------------- /nodes/video_nodes/remove_video_background_node.py: -------------------------------------------------------------------------------- 1 | import os 2 | import uuid 3 | import requests 4 | import folder_paths 5 | from ..common import deserialize_and_get_comfy_key, poll_status_until_completed 6 | from .video_utils import upload_video_to_s3 7 | 8 | class RemoveVideoBackgroundNode(): 9 | """ 10 | Removes the background from a video using the Bria API. 11 | 12 | Parameters: 13 | api_key (str): Your Bria API key. 14 | video_url (str): Local path or URL of the video to process. 15 | preserve_audio (bool, optional): Whether to keep the audio track. Default is True. 16 | output_container_and_codec (str, optional): Desired output format and codec. Default is "webm_vp9". 17 | 18 | Returns: 19 | result_video_url (STRING): URL of the video with background removed. 20 | """ 21 | @classmethod 22 | def INPUT_TYPES(self): 23 | return { 24 | "required": { 25 | "api_key": ("STRING", {"default": "BRIA_API_TOKEN"}), 26 | "video_url": ("STRING", { 27 | "default": "", 28 | "tooltip": "URL of video to process (provide either frames or video_url)" 29 | }), 30 | }, 31 | "optional": { 32 | "preserve_audio": ("BOOLEAN", {"default": True}), 33 | "output_container_and_codec": ([ 34 | "mp4_h264", 35 | "mp4_h265", 36 | "webm_vp9", 37 | "mov_h265", 38 | "mov_proresks", 39 | "mkv_h264", 40 | "mkv_h265", 41 | "mkv_vp9", 42 | "gif" 43 | ], {"default": "webm_vp9"}), 44 | } 45 | } 46 | 47 | RETURN_TYPES = ("STRING",) 48 | RETURN_NAMES = ("result_video_url",) 49 | CATEGORY = "API Nodes" 50 | FUNCTION = "execute" 51 | 52 | def __init__(self): 53 | self.api_url = "https://engine.prod.bria-api.com/v2/video/edit/remove_background" 54 | 55 | def execute(self, api_key, video_url, preserve_audio=True, output_container_and_codec="webm_vp9",): 56 | if api_key.strip() == "" or api_key.strip() == "BRIA_API_TOKEN": 57 | raise Exception("Please insert a valid API key.") 58 | api_key = deserialize_and_get_comfy_key(api_key) 59 | video_path = None 60 | 61 | input_video_url = "" 62 | if video_url and video_url.strip() != "": 63 | if os.path.exists(video_url): 64 | filename = f"{ str(uuid.uuid4())}_{os.path.basename(video_url)}" 65 | input_video_url = upload_video_to_s3(video_url, filename, api_key) 66 | if video_url.startswith(folder_paths.get_temp_directory()): 67 | video_path = None 68 | else: 69 | input_video_url = video_url 70 | 71 | try: 72 | 73 | print("Step 3: Calling Bria API for background removal...") 74 | payload = { 75 | "video": input_video_url, 76 | "preserve_audio": preserve_audio, 77 | "output_container_and_codec": output_container_and_codec 78 | } 79 | 80 | headers = { 81 | "Content-Type": "application/json", 82 | "api_token": f"{api_key}" 83 | } 84 | 85 | response = requests.post(self.api_url, json=payload, headers=headers) 86 | 87 | if response.status_code == 200 or response.status_code == 202: 88 | print('Initial Video RMBG request successful, polling for completion...') 89 | response_dict = response.json() 90 | 91 | status_url = response_dict.get('status_url') 92 | request_id = response_dict.get('request_id') 93 | 94 | if not status_url: 95 | raise Exception("No status_url returned from API") 96 | 97 | print(f"Request ID: {request_id}, Status URL: {status_url}") 98 | 99 | final_response = poll_status_until_completed(status_url, api_key, timeout=3600, check_interval=5) 100 | 101 | result_video_url = final_response['result']['video_url'] 102 | 103 | print(f"Video processing completed. Result URL: {result_video_url}") 104 | print(f"Background removal complete! Use Preview Video URL node to view the result.") 105 | 106 | return (result_video_url,) 107 | else: 108 | raise Exception(f"Error: API request failed with status code {response.status_code} {response.text}") 109 | 110 | except Exception as e: 111 | raise Exception(f"{e}") 112 | finally: 113 | if video_path: 114 | try: 115 | if os.path.exists(video_path): 116 | os.unlink(video_path) 117 | except: 118 | pass 119 | 120 | -------------------------------------------------------------------------------- /nodes/video_nodes/preview_video_node_from_url.py: -------------------------------------------------------------------------------- 1 | import os 2 | import uuid 3 | import folder_paths 4 | import requests 5 | 6 | class PreviewVideoURLNode: 7 | """ 8 | Bria Preview Video URL Node 9 | 10 | This node takes a video URL as a string and downloads it to preview 11 | directly in the ComfyUI interface. 12 | 13 | Parameters: 14 | - video_url: URL of the video to preview (http/https) 15 | """ 16 | 17 | def __init__(self): 18 | self.output_dir = folder_paths.get_temp_directory() 19 | self.type = "temp" 20 | 21 | @classmethod 22 | def INPUT_TYPES(cls): 23 | return { 24 | "required": { 25 | "video_url": ("STRING", { 26 | "default": "", 27 | "multiline": False, 28 | "tooltip": "URL of the video to preview (http/https)" 29 | }), 30 | }, 31 | "hidden": { 32 | "prompt": "PROMPT", 33 | "extra_pnginfo": "EXTRA_PNGINFO" 34 | }, 35 | } 36 | 37 | RETURN_TYPES = () 38 | FUNCTION = "preview_video_url" 39 | OUTPUT_NODE = True 40 | CATEGORY = "API Nodes" 41 | DESCRIPTION = "Previews a video from URL directly in the ComfyUI interface." 42 | 43 | def preview_video_url(self, video_url, prompt=None, extra_pnginfo=None): 44 | """ 45 | Preview video from URL 46 | 47 | Args: 48 | video_url: URL of the video (http/https) 49 | prompt: Hidden parameter for ComfyUI workflow 50 | extra_pnginfo: Hidden parameter for ComfyUI metadata 51 | 52 | Returns: 53 | dict: UI output with video file for preview 54 | """ 55 | if not video_url or video_url.strip() == "": 56 | raise ValueError("video_url cannot be empty") 57 | 58 | if not video_url.startswith("http://") and not video_url.startswith("https://"): 59 | raise ValueError("video_url must be a valid HTTP or HTTPS URL") 60 | 61 | print(f"Downloading video from URL: {video_url}") 62 | 63 | # Download video from URL 64 | try: 65 | response = requests.get(video_url, stream=True, timeout=60) 66 | response.raise_for_status() 67 | 68 | # Determine file extension from URL or Content-Type 69 | content_type = response.headers.get('Content-Type', '') 70 | extension = self._get_extension_from_content_type(content_type, video_url) 71 | 72 | filename_prefix = str(uuid.uuid4()) + "_video_url_preview" 73 | 74 | # Get save path 75 | full_output_folder = self.output_dir 76 | filename = f"{filename_prefix}.{extension}" 77 | filepath = os.path.join(full_output_folder, filename) 78 | 79 | 80 | # Save video to temp directory 81 | print(f"Saving video to: {filepath}") 82 | with open(filepath, 'wb') as f: 83 | for chunk in response.iter_content(chunk_size=8192): 84 | if chunk: 85 | f.write(chunk) 86 | 87 | file_size = os.path.getsize(filepath) 88 | print(f"Video downloaded successfully: {filename} ({file_size / (1024*1024):.2f} MB)") 89 | 90 | return { 91 | "ui": { 92 | "images": [{ 93 | "filename": filename, 94 | "subfolder": "", 95 | "type": self.type, 96 | "format": extension 97 | }], 98 | "animated": (True,), 99 | "has_audio": (True,) 100 | } 101 | } 102 | 103 | except requests.exceptions.RequestException as e: 104 | raise Exception(f"Failed to download video from URL: {str(e)}") 105 | except Exception as e: 106 | raise Exception(f"Error previewing video: {str(e)}") 107 | 108 | def _get_extension_from_content_type(self, content_type, url): 109 | """ 110 | Determine file extension from Content-Type header or URL 111 | """ 112 | # Map common video MIME types to extensions 113 | content_type_map = { 114 | 'video/mp4': 'mp4', 115 | 'video/webm': 'webm', 116 | 'video/quicktime': 'mov', 117 | 'video/x-matroska': 'mkv', 118 | 'video/x-msvideo': 'avi', 119 | 'image/gif': 'gif', 120 | } 121 | 122 | # Try to get extension from Content-Type 123 | for mime_type, ext in content_type_map.items(): 124 | if mime_type in content_type.lower(): 125 | return ext 126 | 127 | # Try to get extension from URL 128 | url_path = url.split('?')[0] # Remove query parameters 129 | if '.' in url_path: 130 | url_ext = url_path.rsplit('.', 1)[-1].lower() 131 | if url_ext in ['mp4', 'webm', 'mov', 'mkv', 'avi', 'gif', 'webp']: 132 | return url_ext 133 | 134 | # Default to mp4 135 | return 'mp4' 136 | -------------------------------------------------------------------------------- /nodes/generate_image_lite_node_v2.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import torch 3 | 4 | from .common import ( 5 | deserialize_and_get_comfy_key, 6 | postprocess_image, 7 | preprocess_image, 8 | image_to_base64, 9 | poll_status_until_completed, 10 | ) 11 | 12 | 13 | class GenerateImageLiteNodeV2: 14 | """Lite Image Generation Node""" 15 | 16 | api_url = "https://engine.prod.bria-api.com/v2/image/generate/lite" 17 | 18 | @classmethod 19 | def INPUT_TYPES(cls): 20 | return { 21 | "required": { 22 | "api_token": ("STRING", {"default": "BRIA_API_TOKEN"}), 23 | "prompt": ("STRING",), 24 | }, 25 | "optional": { 26 | "model_version": (["FIBO"], {"default": "FIBO"}), 27 | "structured_prompt": ("STRING", {"default": ""}), 28 | "images": ("IMAGE",), 29 | "aspect_ratio": ( 30 | ["1:1", "2:3", "3:2", "3:4", "4:3", "4:5", "5:4", "9:16", "16:9"], 31 | {"default": "1:1"}, 32 | ), 33 | "steps_num": ( 34 | "INT", 35 | { 36 | "default": 8, 37 | "min": 8, 38 | "max": 30, 39 | }, 40 | ), 41 | "guidance_scale": ( 42 | "INT", 43 | { 44 | "default": 5, 45 | "min": 3, 46 | "max": 5, 47 | }, 48 | ), 49 | "seed": ("INT", {"default": 123456}), 50 | }, 51 | } 52 | 53 | RETURN_TYPES = ("IMAGE", "STRING", "INT") 54 | RETURN_NAMES = ("image", "structured_prompt", "seed") 55 | CATEGORY = "API Nodes" 56 | FUNCTION = "execute" 57 | 58 | def _validate_token(self, api_token: str): 59 | if api_token.strip() == "" or api_token.strip() == "BRIA_API_TOKEN": 60 | raise Exception("Please insert a valid API token.") 61 | 62 | def _build_payload( 63 | self, 64 | prompt, 65 | model_version, 66 | structured_prompt, 67 | aspect_ratio, 68 | steps_num, 69 | guidance_scale, 70 | seed, 71 | images=None, 72 | ): 73 | payload = { 74 | "prompt": prompt, 75 | "model_version": model_version, 76 | "aspect_ratio": aspect_ratio, 77 | "steps_num": steps_num, 78 | "guidance_scale": guidance_scale, 79 | "seed": seed, 80 | } 81 | if structured_prompt: 82 | payload["structured_prompt"] = structured_prompt 83 | 84 | if images is not None: 85 | if isinstance(images, torch.Tensor): 86 | preprocess_images = preprocess_image(images) 87 | payload["images"] = [image_to_base64(preprocess_images)] 88 | 89 | return payload 90 | 91 | def execute( 92 | self, 93 | api_token, 94 | prompt, 95 | model_version, 96 | structured_prompt, 97 | aspect_ratio, 98 | steps_num, 99 | guidance_scale, 100 | seed, 101 | images=None, 102 | ): 103 | self._validate_token(api_token) 104 | payload = self._build_payload( 105 | prompt, 106 | model_version, 107 | structured_prompt, 108 | aspect_ratio, 109 | steps_num, 110 | guidance_scale, 111 | seed, 112 | images, 113 | ) 114 | api_token = deserialize_and_get_comfy_key(api_token) 115 | 116 | headers = {"Content-Type": "application/json", "api_token": api_token} 117 | 118 | try: 119 | response = requests.post(self.api_url, json=payload, headers=headers) 120 | 121 | if response.status_code in (200, 202): 122 | print( 123 | f"Initial request successful to {self.api_url}, polling for completion..." 124 | ) 125 | response_dict = response.json() 126 | status_url = response_dict.get("status_url") 127 | request_id = response_dict.get("request_id") 128 | 129 | if not status_url: 130 | raise Exception("No status_url returned from API") 131 | 132 | print(f"Request ID: {request_id}, Status URL: {status_url}") 133 | 134 | final_response = poll_status_until_completed(status_url, api_token) 135 | 136 | result = final_response.get("result", {}) 137 | result_image_url = result.get("image_url") 138 | structured_prompt = result.get("structured_prompt", "") 139 | used_seed = result.get("seed") 140 | 141 | image_response = requests.get(result_image_url) 142 | result_image = postprocess_image(image_response.content) 143 | 144 | return (result_image, structured_prompt, used_seed) 145 | 146 | raise Exception( 147 | f"Error: API request failed with status code {response.status_code} {response.text}" 148 | ) 149 | 150 | except Exception as e: 151 | raise Exception(f"{e}") -------------------------------------------------------------------------------- /nodes/generate_image_node_v2.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import torch 3 | 4 | from .common import ( 5 | deserialize_and_get_comfy_key, 6 | postprocess_image, 7 | preprocess_image, 8 | image_to_base64, 9 | poll_status_until_completed, 10 | ) 11 | 12 | 13 | class GenerateImageNodeV2: 14 | """Standard Image Generation Node""" 15 | 16 | api_url = "https://engine.prod.bria-api.com/v2/image/generate" 17 | 18 | @classmethod 19 | def INPUT_TYPES(cls): 20 | return { 21 | "required": { 22 | "api_token": ("STRING", {"default": "BRIA_API_TOKEN"}), 23 | "prompt": ("STRING",), 24 | }, 25 | "optional": { 26 | "model_version": (["FIBO"], {"default": "FIBO"}), 27 | "structured_prompt": ("STRING", {"default": ""}), 28 | "negative_prompt": ("STRING",), 29 | "images": ("IMAGE",), 30 | "aspect_ratio": ( 31 | ["1:1", "2:3", "3:2", "3:4", "4:3", "4:5", "5:4", "9:16", "16:9"], 32 | {"default": "1:1"}, 33 | ), 34 | "steps_num": ( 35 | "INT", 36 | { 37 | "default": 50, 38 | "min": 35, 39 | "max": 50, 40 | }, 41 | ), 42 | "guidance_scale": ( 43 | "INT", 44 | { 45 | "default": 5, 46 | "min": 3, 47 | "max": 5, 48 | }, 49 | ), 50 | "seed": ("INT", {"default": 123456}), 51 | }, 52 | } 53 | 54 | RETURN_TYPES = ("IMAGE", "STRING", "INT") 55 | RETURN_NAMES = ("image", "structured_prompt", "seed") 56 | CATEGORY = "API Nodes" 57 | FUNCTION = "execute" 58 | 59 | 60 | def _validate_token(self, api_token: str): 61 | if api_token.strip() == "" or api_token.strip() == "BRIA_API_TOKEN": 62 | raise Exception("Please insert a valid API token.") 63 | 64 | def _build_payload( 65 | self, 66 | prompt, 67 | model_version, 68 | structured_prompt, 69 | aspect_ratio, 70 | steps_num, 71 | guidance_scale, 72 | seed, 73 | negative_prompt=None, 74 | images=None, 75 | ): 76 | payload = { 77 | "prompt": prompt, 78 | "model_version": model_version, 79 | "aspect_ratio": aspect_ratio, 80 | "steps_num": steps_num, 81 | "guidance_scale": guidance_scale, 82 | "seed": seed, 83 | "negative_prompt":negative_prompt 84 | } 85 | if structured_prompt: 86 | payload["structured_prompt"] = structured_prompt 87 | 88 | if images is not None: 89 | if isinstance(images, torch.Tensor): 90 | preprocess_images = preprocess_image(images) 91 | payload["images"] = [image_to_base64(preprocess_images)] 92 | 93 | return payload 94 | 95 | def execute( 96 | self, 97 | api_token, 98 | prompt, 99 | model_version, 100 | structured_prompt, 101 | aspect_ratio, 102 | steps_num, 103 | guidance_scale, 104 | seed, 105 | negative_prompt=None, 106 | images=None, 107 | ): 108 | self._validate_token(api_token) 109 | payload = self._build_payload( 110 | prompt, 111 | model_version, 112 | structured_prompt, 113 | aspect_ratio, 114 | steps_num, 115 | guidance_scale, 116 | seed, 117 | negative_prompt, 118 | images, 119 | ) 120 | api_token = deserialize_and_get_comfy_key(api_token) 121 | 122 | headers = {"Content-Type": "application/json", "api_token": api_token} 123 | 124 | try: 125 | response = requests.post(self.api_url, json=payload, headers=headers) 126 | 127 | if response.status_code in (200, 202): 128 | print( 129 | f"Initial request successful to {self.api_url}, polling for completion..." 130 | ) 131 | response_dict = response.json() 132 | status_url = response_dict.get("status_url") 133 | request_id = response_dict.get("request_id") 134 | 135 | if not status_url: 136 | raise Exception("No status_url returned from API") 137 | 138 | print(f"Request ID: {request_id}, Status URL: {status_url}") 139 | 140 | final_response = poll_status_until_completed(status_url, api_token) 141 | 142 | result = final_response.get("result", {}) 143 | result_image_url = result.get("image_url") 144 | structured_prompt = result.get("structured_prompt", "") 145 | used_seed = result.get("seed") 146 | 147 | image_response = requests.get(result_image_url) 148 | result_image = postprocess_image(image_response.content) 149 | 150 | return (result_image, structured_prompt, used_seed) 151 | 152 | raise Exception( 153 | f"Error: API request failed with status code {response.status_code} {response.text}" 154 | ) 155 | 156 | except Exception as e: 157 | raise Exception(f"{e}") -------------------------------------------------------------------------------- /nodes/video_nodes/video_mask_by_prompt_node.py: -------------------------------------------------------------------------------- 1 | import os 2 | import uuid 3 | import requests 4 | import folder_paths 5 | from ..common import deserialize_and_get_comfy_key, poll_status_until_completed 6 | from .video_utils import upload_video_to_s3 7 | 8 | class VideoMaskByPromptNode(): 9 | """ 10 | Generate a video mask using a text prompt with the Bria API. 11 | 12 | Parameters: 13 | prompt (str): Text prompt describing what to mask in the video. 14 | api_key (str): Your Bria API key. 15 | video_url (str): Local path or URL of the video to process. 16 | output_container_and_codec (str, optional): Desired output format and codec. Default is "mp4_h264". 17 | preserve_audio (bool, optional): Whether to keep the audio track. Default is True. 18 | 19 | Returns: 20 | mask_url (STRING): URL of the generated video mask. 21 | """ 22 | @classmethod 23 | def INPUT_TYPES(self): 24 | return { 25 | "required": { 26 | "prompt": ("STRING", {"default": ""}), 27 | "api_key": ("STRING", {"default": "BRIA_API_TOKEN"}), 28 | "video_url": ("STRING", { 29 | "default": "", 30 | "tooltip": "URL of video to process (provide either frames or video_url)" 31 | }), 32 | }, 33 | "optional": { 34 | "output_container_and_codec": ([ 35 | "mp4_h264", 36 | "mp4_h265", 37 | "webm_vp9", 38 | "mov_h265", 39 | "mov_proresks", 40 | "mkv_h264", 41 | "mkv_h265", 42 | "mkv_vp9", 43 | "gif" 44 | ], {"default": "mp4_h264"}), 45 | "preserve_audio": ("BOOLEAN", {"default": True}), 46 | } 47 | } 48 | 49 | RETURN_TYPES = ("STRING",) 50 | RETURN_NAMES = ("mask_url",) 51 | CATEGORY = "API Nodes" 52 | FUNCTION = "execute" 53 | 54 | def __init__(self): 55 | self.api_url = "https://engine.prod.bria-api.com/v2/video/segment/mask_by_prompt" 56 | 57 | def execute(self, prompt, api_key, video_url, output_container_and_codec="mp4_h264", preserve_audio=True): 58 | if api_key.strip() == "" or api_key.strip() == "BRIA_API_TOKEN": 59 | raise Exception("Please insert a valid API key.") 60 | api_key = deserialize_and_get_comfy_key(api_key) 61 | 62 | video_path = None 63 | 64 | if video_url and video_url.strip() != "": 65 | if os.path.exists(video_url): 66 | filename = f"{ str(uuid.uuid4())}_{os.path.basename(video_url)}" 67 | input_video_url = upload_video_to_s3(video_url, filename, api_key) 68 | 69 | if not input_video_url or not (input_video_url.startswith('http://') or input_video_url.startswith('https://')): 70 | raise Exception(f"Failed to upload video to S3. Got: {input_video_url}") 71 | 72 | 73 | if video_url.startswith(folder_paths.get_temp_directory()): 74 | video_path = None 75 | else: 76 | input_video_url = video_url 77 | 78 | try: 79 | 80 | print("Step 3: Calling Bria API for video mask generation...") 81 | payload = { 82 | "video": input_video_url, 83 | "prompt": prompt, 84 | "output_container_and_codec": output_container_and_codec, 85 | "preserve_audio": preserve_audio 86 | } 87 | 88 | headers = { 89 | "Content-Type": "application/json", 90 | "api_token": f"{api_key}" 91 | } 92 | 93 | response = requests.post(self.api_url, json=payload, headers=headers) 94 | 95 | if response.status_code == 200 or response.status_code == 202: 96 | print('Initial Video Mask by Prompt request successful, polling for completion...') 97 | response_dict = response.json() 98 | 99 | status_url = response_dict.get('status_url') 100 | request_id = response_dict.get('request_id') 101 | 102 | if not status_url: 103 | raise Exception("No status_url returned from API") 104 | 105 | print(f"Request ID: {request_id}, Status URL: {status_url}") 106 | 107 | final_response = poll_status_until_completed(status_url, api_key, timeout=3600, check_interval=5) 108 | 109 | result_mask_url = final_response['result']['mask_url'] 110 | 111 | print(f"Video mask processing completed. Result URL: {result_mask_url}") 112 | print(f"Video mask generation complete! Use Preview Video URL node to view the result.") 113 | 114 | return (result_mask_url,) 115 | else: 116 | raise Exception(f"Error: API request failed with status code {response.status_code} {response.text}") 117 | 118 | except Exception as e: 119 | raise Exception(f"{e}") 120 | finally: 121 | if video_path: 122 | try: 123 | if os.path.exists(video_path): 124 | os.unlink(video_path) 125 | except: 126 | pass -------------------------------------------------------------------------------- /nodes/video_nodes/video_increase_resolution_node.py: -------------------------------------------------------------------------------- 1 | import os 2 | import uuid 3 | import requests 4 | import folder_paths 5 | from ..common import deserialize_and_get_comfy_key, poll_status_until_completed 6 | from .video_utils import upload_video_to_s3 7 | 8 | class VideoIncreaseResolutionNode(): 9 | """ 10 | Increase the resolution of a video using the Bria API. 11 | 12 | Parameters: 13 | api_key (str): Your Bria API key. 14 | video_url (str): Local path or URL of the video to process. 15 | desired_increase (str, optional): Resolution increase factor, '2' or '4'. Default is '2'. 16 | output_container_and_codec (str, optional): Desired output format and codec. Default is "mp4_h264". 17 | preserve_audio (bool, optional): Whether to keep the audio track. Default is True. 18 | 19 | Returns: 20 | result_video_url (STRING): URL of the processed video with increased resolution. 21 | """ 22 | @classmethod 23 | def INPUT_TYPES(self): 24 | return { 25 | "required": { 26 | "api_key": ("STRING", {"default": "BRIA_API_TOKEN"}), 27 | "video_url": ("STRING", { 28 | "default": "", 29 | "tooltip": "URL of video to process (provide either frames or video_url)" 30 | }), 31 | }, 32 | "optional": { 33 | "desired_increase": (['2', '4'], {"default": '2'}), 34 | "output_container_and_codec": ([ 35 | "mp4_h264", 36 | "mp4_h265", 37 | "webm_vp9", 38 | "mov_h265", 39 | "mov_proresks", 40 | "mkv_h264", 41 | "mkv_h265", 42 | "mkv_vp9", 43 | "gif" 44 | ], {"default": "mp4_h264"}), 45 | "preserve_audio": ("BOOLEAN", {"default": True}), 46 | } 47 | } 48 | 49 | RETURN_TYPES = ("STRING",) 50 | RETURN_NAMES = ("result_video_url",) 51 | CATEGORY = "API Nodes" 52 | FUNCTION = "execute" 53 | 54 | def __init__(self): 55 | self.api_url = "https://engine.prod.bria-api.com/v2/video/edit/increase_resolution" 56 | 57 | def execute(self, api_key, video_url, desired_increase='2', output_container_and_codec="mp4_h264", preserve_audio=True): 58 | if api_key.strip() == "" or api_key.strip() == "BRIA_API_TOKEN": 59 | raise Exception("Please insert a valid API key.") 60 | api_key = deserialize_and_get_comfy_key(api_key) 61 | video_path = None 62 | 63 | if video_url and video_url.strip() != "": 64 | if os.path.exists(video_url): 65 | filename = f"{ str(uuid.uuid4())}_{os.path.basename(video_url)}" 66 | input_video_url = upload_video_to_s3(video_url, filename, api_key) 67 | 68 | if not input_video_url or not (input_video_url.startswith('http://') or input_video_url.startswith('https://')): 69 | raise Exception(f"Failed to upload video to S3. Got: {input_video_url}") 70 | 71 | if video_url.startswith(folder_paths.get_temp_directory()): 72 | video_path = None 73 | else: 74 | input_video_url = video_url 75 | 76 | try: 77 | 78 | print("Step 3: Calling Bria API for resolution increase...") 79 | payload = { 80 | "video": input_video_url, 81 | "desired_increase": desired_increase, 82 | "output_container_and_codec": output_container_and_codec, 83 | "preserve_audio": preserve_audio 84 | } 85 | 86 | headers = { 87 | "Content-Type": "application/json", 88 | "api_token": f"{api_key}" 89 | } 90 | 91 | response = requests.post(self.api_url, json=payload, headers=headers) 92 | 93 | if response.status_code == 200 or response.status_code == 202: 94 | print('Initial Video Increase Resolution request successful, polling for completion...') 95 | response_dict = response.json() 96 | 97 | status_url = response_dict.get('status_url') 98 | request_id = response_dict.get('request_id') 99 | 100 | if not status_url: 101 | raise Exception("No status_url returned from API") 102 | 103 | print(f"Request ID: {request_id}, Status URL: {status_url}") 104 | 105 | final_response = poll_status_until_completed(status_url, api_key, timeout=3600, check_interval=5) 106 | 107 | result_video_url = final_response['result']['video_url'] 108 | 109 | print(f"Video processing completed. Result URL: {result_video_url}") 110 | print(f"Resolution increase complete! Use Preview Video URL node to view the result.") 111 | 112 | return (result_video_url,) 113 | else: 114 | raise Exception(f"Error: API request failed with status code {response.status_code} {response.text}") 115 | 116 | except Exception as e: 117 | raise Exception(f"{e}") 118 | finally: 119 | if video_path: 120 | try: 121 | if os.path.exists(video_path): 122 | os.unlink(video_path) 123 | except: 124 | pass -------------------------------------------------------------------------------- /nodes/replace_bg_node.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import requests 3 | from PIL import Image 4 | import io 5 | import torch 6 | 7 | from .common import deserialize_and_get_comfy_key, image_to_base64, preprocess_image, preprocess_mask, poll_status_until_completed 8 | 9 | 10 | class ReplaceBgNode(): 11 | @classmethod 12 | def INPUT_TYPES(self): 13 | return { 14 | "required": { 15 | "image": ("IMAGE",), # Input image from another node 16 | "api_key": ("STRING", {"default": "BRIA_API_TOKEN"}), # API Key input with a default value 17 | }, 18 | "optional": { 19 | "mode": (["base", "fast", "high_control"], {"default": "base"}), 20 | "prompt": ("STRING",), 21 | "ref_images": ("IMAGE",), 22 | "refine_prompt": ("BOOLEAN", {"default": True}), 23 | "enhance_ref_images": ("BOOLEAN", {"default": True}), 24 | "original_quality": ("BOOLEAN", {"default": False}), 25 | "negative_prompt": ("STRING", {"default": None}), 26 | "seed": ("INT", {"default": 681794}), 27 | "visual_output_content_moderation": ("BOOLEAN", {"default": False}), 28 | "prompt_content_moderation": ("BOOLEAN", {"default": False}), 29 | "force_background_detection": ("BOOLEAN", {"default": False}), 30 | } 31 | } 32 | 33 | RETURN_TYPES = ("IMAGE",) 34 | RETURN_NAMES = ("output_image",) 35 | CATEGORY = "API Nodes" 36 | FUNCTION = "execute" # This is the method that will be executed 37 | 38 | def __init__(self): 39 | self.api_url = "https://engine.prod.bria-api.com/v2/image/edit/replace_background" # Replace BG API URL 40 | 41 | # Define the execute method as expected by ComfyUI 42 | def execute(self, image, mode, 43 | refine_prompt, 44 | original_quality, 45 | negative_prompt, 46 | seed, 47 | api_key, 48 | visual_output_content_moderation, 49 | prompt_content_moderation, 50 | enhance_ref_images, 51 | force_background_detection, 52 | prompt=None, 53 | ref_images=None,): 54 | if api_key.strip() == "" or api_key.strip() == "BRIA_API_TOKEN": 55 | raise Exception("Please insert a valid API key.") 56 | api_key = deserialize_and_get_comfy_key(api_key) 57 | 58 | # Check if image and mask are tensors, if so, convert to NumPy arrays 59 | if isinstance(image, torch.Tensor): 60 | image = preprocess_image(image) 61 | 62 | # Convert the image to Base64 string 63 | image_base64 = image_to_base64(image) 64 | 65 | if ref_images is not None: 66 | ref_images = preprocess_image(ref_images) 67 | ref_images = [image_to_base64(ref_images)] 68 | else: 69 | ref_images=[] 70 | 71 | # Prepare the API request payload for v2 API 72 | payload = { 73 | "image": image_base64, 74 | "mode": mode, 75 | "prompt": prompt, 76 | "ref_images":ref_images, 77 | "refine_prompt": refine_prompt, 78 | "original_quality": original_quality, 79 | "negative_prompt": negative_prompt, 80 | "seed": seed, 81 | "prompt_content_moderation": prompt_content_moderation, 82 | "visual_output_content_moderation":visual_output_content_moderation, 83 | "enhance_ref_images":enhance_ref_images, 84 | "force_background_detection": force_background_detection 85 | } 86 | 87 | headers = { 88 | "Content-Type": "application/json", 89 | "api_token": f"{api_key}" 90 | } 91 | 92 | try: 93 | response = requests.post(self.api_url, json=payload, headers=headers) 94 | 95 | if response.status_code == 200 or response.status_code == 202: 96 | print('Initial replace background request successful, polling for completion...') 97 | response_dict = response.json() 98 | status_url = response_dict.get('status_url') 99 | request_id = response_dict.get('request_id') 100 | 101 | if not status_url: 102 | raise Exception("No status_url returned from API") 103 | 104 | print(f"Request ID: {request_id}, Status URL: {status_url}") 105 | 106 | # Poll status URL until completion 107 | final_response = poll_status_until_completed(status_url, api_key) 108 | 109 | # Get the result image URL 110 | result_image_url = final_response['result']['image_url'] 111 | 112 | # Download and process the result image 113 | image_response = requests.get(result_image_url) 114 | result_image = Image.open(io.BytesIO(image_response.content)) 115 | result_image = result_image.convert("RGB") 116 | result_image = np.array(result_image).astype(np.float32) / 255.0 117 | result_image = torch.from_numpy(result_image)[None,] 118 | 119 | return (result_image,) 120 | else: 121 | raise Exception(f"Error: API request failed with status code {response.status_code}{response.text}") 122 | 123 | except Exception as e: 124 | raise Exception(f"{e}") 125 | -------------------------------------------------------------------------------- /nodes/video_nodes/video_erase_elements_node.py: -------------------------------------------------------------------------------- 1 | import os 2 | import uuid 3 | import requests 4 | import folder_paths 5 | from ..common import deserialize_and_get_comfy_key, poll_status_until_completed 6 | from .video_utils import upload_video_to_s3 7 | 8 | class VideoEraseElementsNode(): 9 | """ 10 | Erase elements from a video using the Bria API. 11 | 12 | Parameters: 13 | api_key (str): Your Bria API key. 14 | video_url (str): Local path or URL of the video to process. 15 | mask_url (str, optional): URL of a mask video for selective erasing. 16 | output_container_and_codec (str, optional): Desired output format and codec. Default is "mp4_h264". 17 | preserve_audio (bool, optional): Whether to keep the audio track. Default is True. 18 | 19 | Returns: 20 | result_video_url (STRING): URL of the processed video with elements erased. 21 | """ 22 | @classmethod 23 | def INPUT_TYPES(self): 24 | return { 25 | "required": { 26 | "api_key": ("STRING", {"default": "BRIA_API_TOKEN"}), 27 | "video_url": ("STRING", { 28 | "default": "", 29 | "tooltip": "URL of video to process (provide either frames or video_url)" 30 | }), 31 | }, 32 | "optional": { 33 | "mask_url": ("STRING", { 34 | "default": "", 35 | "tooltip": "URL of mask video (optional)" 36 | }), 37 | "output_container_and_codec": ([ 38 | "mp4_h264", 39 | "mp4_h265", 40 | "webm_vp9", 41 | "mov_h265", 42 | "mov_proresks", 43 | "mkv_h264", 44 | "mkv_h265", 45 | "mkv_vp9", 46 | "gif" 47 | ], {"default": "mp4_h264"}), 48 | "preserve_audio": ("BOOLEAN", {"default": True}), 49 | } 50 | } 51 | 52 | RETURN_TYPES = ("STRING",) 53 | RETURN_NAMES = ("result_video_url",) 54 | CATEGORY = "API Nodes" 55 | FUNCTION = "execute" 56 | 57 | def __init__(self): 58 | self.api_url = "https://engine.prod.bria-api.com/v2/video/edit/erase" 59 | 60 | def execute(self, api_key, video_url, mask_url="", output_container_and_codec="mp4_h264", preserve_audio=True): 61 | if api_key.strip() == "" or api_key.strip() == "BRIA_API_TOKEN": 62 | raise Exception("Please insert a valid API key.") 63 | api_key = deserialize_and_get_comfy_key(api_key) 64 | video_path = None 65 | 66 | if video_url and video_url.strip() != "": 67 | # Check if video_url is a local file path or a URL 68 | if os.path.exists(video_url): 69 | filename = f"{ str(uuid.uuid4())}_{os.path.basename(video_url)}" 70 | input_video_url = upload_video_to_s3(video_url, filename, api_key) 71 | 72 | if not input_video_url or not (input_video_url.startswith('http://') or input_video_url.startswith('https://')): 73 | raise Exception(f"Failed to upload video to S3. Got: {input_video_url}") 74 | if video_url.startswith(folder_paths.get_temp_directory()): 75 | video_path = None 76 | else: 77 | input_video_url = video_url 78 | 79 | try: 80 | 81 | print("Step 3: Calling Bria API for element erasure...") 82 | payload = { 83 | "video": input_video_url, 84 | "mask": mask_url, 85 | "output_container_and_codec": output_container_and_codec, 86 | "preserve_audio": preserve_audio 87 | } 88 | 89 | headers = { 90 | "Content-Type": "application/json", 91 | "api_token": f"{api_key}" 92 | } 93 | 94 | response = requests.post(self.api_url, json=payload, headers=headers) 95 | 96 | if response.status_code == 200 or response.status_code == 202: 97 | print('Initial Video Erase Elements request successful, polling for completion...') 98 | response_dict = response.json() 99 | 100 | status_url = response_dict.get('status_url') 101 | request_id = response_dict.get('request_id') 102 | 103 | if not status_url: 104 | raise Exception("No status_url returned from API") 105 | 106 | print(f"Request ID: {request_id}, Status URL: {status_url}") 107 | 108 | final_response = poll_status_until_completed(status_url, api_key, timeout=3600, check_interval=5) 109 | 110 | result_video_url = final_response['result']['video_url'] 111 | 112 | print(f"Video processing completed. Result URL: {result_video_url}") 113 | print(f"Element erasure complete! Use Preview Video URL node to view the result.") 114 | 115 | return (result_video_url,) 116 | else: 117 | raise Exception(f"Error: API request failed with status code {response.status_code} {response.text}") 118 | 119 | except Exception as e: 120 | raise Exception(f"{e}") 121 | finally: 122 | if video_path: 123 | try: 124 | if os.path.exists(video_path): 125 | os.unlink(video_path) 126 | except: 127 | pass -------------------------------------------------------------------------------- /nodes/video_nodes/video_mask_by_key_points_node.py: -------------------------------------------------------------------------------- 1 | import os 2 | import uuid 3 | import requests 4 | import folder_paths 5 | from ..common import deserialize_and_get_comfy_key, poll_status_until_completed 6 | from .video_utils import upload_video_to_s3 7 | import json 8 | 9 | class VideoMaskByKeyPointsNode(): 10 | """ 11 | Generate a video mask using key points with the Bria API. 12 | 13 | Parameters: 14 | key_points (str): JSON string of key points for masking. 15 | api_key (str): Your Bria API key. 16 | video_url (str): Local path or URL of the video to process. 17 | output_container_and_codec (str, optional): Desired output format and codec. Default is "mp4_h264". 18 | preserve_audio (bool, optional): Whether to keep the audio track. Default is True. 19 | 20 | Returns: 21 | mask_url (STRING): URL of the generated video mask. 22 | """ 23 | @classmethod 24 | def INPUT_TYPES(self): 25 | return { 26 | "required": { 27 | "key_points": ("STRING", {"default": "[]", "multiline": True}), 28 | "api_key": ("STRING", {"default": "BRIA_API_TOKEN"}), 29 | "video_url": ("STRING", { 30 | "default": "", 31 | "tooltip": "URL of video to process (provide either frames or video_url)" 32 | }), 33 | }, 34 | "optional": { 35 | "output_container_and_codec": ([ 36 | "mp4_h264", 37 | "mp4_h265", 38 | "webm_vp9", 39 | "mov_h265", 40 | "mov_proresks", 41 | "mkv_h264", 42 | "mkv_h265", 43 | "mkv_vp9", 44 | "gif" 45 | ], {"default": "mp4_h264"}), 46 | "preserve_audio": ("BOOLEAN", {"default": True}), 47 | } 48 | } 49 | 50 | RETURN_TYPES = ("STRING",) 51 | RETURN_NAMES = ("mask_url",) 52 | CATEGORY = "API Nodes" 53 | FUNCTION = "execute" 54 | 55 | def __init__(self): 56 | self.api_url = "https://engine.prod.bria-api.com/v2/video/segment/mask_by_key_points" 57 | 58 | def execute(self, key_points, api_key, video_url, output_container_and_codec="mp4_h264", preserve_audio=True): 59 | if api_key.strip() == "" or api_key.strip() == "BRIA_API_TOKEN": 60 | raise Exception("Please insert a valid API key.") 61 | api_key = deserialize_and_get_comfy_key(api_key) 62 | 63 | try: 64 | key_points_array = json.loads(key_points) 65 | except json.JSONDecodeError as e: 66 | raise Exception(f"Invalid JSON format for key_points: {e}") 67 | 68 | video_path = None 69 | 70 | if video_url and video_url.strip() != "": 71 | if os.path.exists(video_url): 72 | filename = f"{ str(uuid.uuid4())}_{os.path.basename(video_url)}" 73 | input_video_url = upload_video_to_s3(video_url, filename, api_key) 74 | 75 | if not input_video_url or not (input_video_url.startswith('http://') or input_video_url.startswith('https://')): 76 | raise Exception(f"Failed to upload video to S3. Got: {input_video_url}") 77 | 78 | 79 | if video_url.startswith(folder_paths.get_temp_directory()): 80 | video_path = None 81 | else: 82 | input_video_url = video_url 83 | 84 | try: 85 | 86 | print("Step 3: Calling Bria API for video mask generation by key points...") 87 | payload = { 88 | "video": input_video_url, 89 | "key_points": key_points_array, 90 | "output_container_and_codec": output_container_and_codec, 91 | "preserve_audio": preserve_audio 92 | } 93 | 94 | headers = { 95 | "Content-Type": "application/json", 96 | "api_token": f"{api_key}" 97 | } 98 | 99 | response = requests.post(self.api_url, json=payload, headers=headers) 100 | 101 | if response.status_code == 200 or response.status_code == 202: 102 | print('Initial Video Mask by Key Points request successful, polling for completion...') 103 | response_dict = response.json() 104 | 105 | status_url = response_dict.get('status_url') 106 | request_id = response_dict.get('request_id') 107 | 108 | if not status_url: 109 | raise Exception("No status_url returned from API") 110 | 111 | print(f"Request ID: {request_id}, Status URL: {status_url}") 112 | 113 | final_response = poll_status_until_completed(status_url, api_key, timeout=3600, check_interval=5) 114 | 115 | result_mask_url = final_response['result']['mask_url'] 116 | 117 | print(f"Video mask processing completed. Result URL: {result_mask_url}") 118 | print(f"Video mask generation complete! Use Preview Video URL node to view the result.") 119 | 120 | return (result_mask_url,) 121 | else: 122 | raise Exception(f"Error: API request failed with status code {response.status_code} {response.text}") 123 | 124 | except Exception as e: 125 | raise Exception(f"{e}") 126 | finally: 127 | if video_path: 128 | try: 129 | if os.path.exists(video_path): 130 | os.unlink(video_path) 131 | except: 132 | pass -------------------------------------------------------------------------------- /nodes/video_nodes/video_solid_color_background_node.py: -------------------------------------------------------------------------------- 1 | import os 2 | import uuid 3 | import requests 4 | import folder_paths 5 | from ..common import deserialize_and_get_comfy_key, poll_status_until_completed 6 | from .video_utils import upload_video_to_s3 7 | 8 | class VideoSolidColorBackgroundNode(): 9 | """ 10 | Apply a solid color background to a video using the Bria API. 11 | 12 | Parameters: 13 | api_key (str): Your Bria API key. 14 | video_url (str): Local path or URL of the video to process. 15 | background_color (str, optional): Color to apply as background. Default is "Transparent". 16 | output_container_and_codec (str, optional): Desired output format and codec. Default is "mp4_h264". 17 | preserve_audio (bool, optional): Whether to keep the audio track. Default is True. 18 | 19 | Returns: 20 | result_video_url (STRING): URL of the video with the solid color background applied. 21 | """ 22 | @classmethod 23 | def INPUT_TYPES(self): 24 | return { 25 | "required": { 26 | "api_key": ("STRING", {"default": "BRIA_API_TOKEN"}), 27 | "video_url": ("STRING", { 28 | "default": "", 29 | "tooltip": "URL of video to process (provide either frames or video_url)" 30 | }), 31 | }, 32 | "optional": { 33 | "background_color": ([ 34 | "Transparent", 35 | "Black", 36 | "White", 37 | "Gray", 38 | "Red", 39 | "Green", 40 | "Blue", 41 | "Yellow", 42 | "Cyan", 43 | "Magenta", 44 | "Orange" 45 | ], {"default": "Transparent"}), 46 | "output_container_and_codec": ([ 47 | "mp4_h264", 48 | "mp4_h265", 49 | "webm_vp9", 50 | "mov_h265", 51 | "mov_proresks", 52 | "mkv_h264", 53 | "mkv_h265", 54 | "mkv_vp9", 55 | "gif" 56 | ], {"default": "webm_vp9"}), 57 | "preserve_audio": ("BOOLEAN", {"default": True}), 58 | } 59 | } 60 | 61 | RETURN_TYPES = ("STRING",) 62 | RETURN_NAMES = ("result_video_url",) 63 | CATEGORY = "API Nodes" 64 | FUNCTION = "execute" 65 | 66 | def __init__(self): 67 | self.api_url = "https://engine.prod.bria-api.com/v2/video/edit/remove_background" 68 | 69 | def execute(self, api_key, video_url, background_color="Transparent", output_container_and_codec="webm_vp9", preserve_audio=True): 70 | if api_key.strip() == "" or api_key.strip() == "BRIA_API_TOKEN": 71 | raise Exception("Please insert a valid API key.") 72 | api_key = deserialize_and_get_comfy_key(api_key) 73 | video_path = None 74 | 75 | if video_url and video_url.strip() != "": 76 | if os.path.exists(video_url): 77 | filename = f"{ str(uuid.uuid4())}_{os.path.basename(video_url)}" 78 | input_video_url = upload_video_to_s3(video_url, filename, api_key) 79 | 80 | if not input_video_url or not (input_video_url.startswith('http://') or input_video_url.startswith('https://')): 81 | raise Exception(f"Failed to upload video to S3. Got: {input_video_url}") 82 | 83 | 84 | if video_url.startswith(folder_paths.get_temp_directory()): 85 | video_path = None 86 | else: 87 | input_video_url = video_url 88 | 89 | try: 90 | 91 | print("Step 3: Calling Bria API for solid color background...") 92 | payload = { 93 | "video": input_video_url, 94 | "background_color": background_color, 95 | "output_container_and_codec": output_container_and_codec, 96 | "preserve_audio": preserve_audio 97 | } 98 | 99 | headers = { 100 | "Content-Type": "application/json", 101 | "api_token": f"{api_key}" 102 | } 103 | 104 | response = requests.post(self.api_url, json=payload, headers=headers) 105 | 106 | if response.status_code == 200 or response.status_code == 202: 107 | print('Initial Video Solid Color Background request successful, polling for completion...') 108 | response_dict = response.json() 109 | 110 | status_url = response_dict.get('status_url') 111 | request_id = response_dict.get('request_id') 112 | 113 | if not status_url: 114 | raise Exception("No status_url returned from API") 115 | 116 | print(f"Request ID: {request_id}, Status URL: {status_url}") 117 | 118 | final_response = poll_status_until_completed(status_url, api_key, timeout=3600, check_interval=5) 119 | 120 | result_video_url = final_response['result']['video_url'] 121 | 122 | print(f"Video processing completed. Result URL: {result_video_url}") 123 | print(f"Solid color background processing complete! Use Preview Video URL node to view the result.") 124 | 125 | return (result_video_url,) 126 | else: 127 | raise Exception(f"Error: API request failed with status code {response.status_code} {response.text}") 128 | 129 | except Exception as e: 130 | raise Exception(f"{e}") 131 | finally: 132 | if video_path: 133 | try: 134 | if os.path.exists(video_path): 135 | os.unlink(video_path) 136 | except: 137 | pass -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- 1 | from .nodes import ( 2 | EraserNode, 3 | GenFillNode, 4 | ImageExpansionNode, 5 | ReplaceBgNode, 6 | RmbgNode, 7 | RemoveForegroundNode, 8 | ShotByTextOriginalNode, 9 | ShotByImageOriginalNode, 10 | TailoredGenNode, 11 | TailoredModelInfoNode, 12 | Text2ImageBaseNode, 13 | Text2ImageFastNode, 14 | Text2ImageHDNode, 15 | TailoredPortraitNode, 16 | ReimagineNode, 17 | GenerateImageNodeV2, 18 | GenerateImageLiteNodeV2, 19 | RefineImageNodeV2, 20 | RefineImageLiteNodeV2, 21 | GenerateStructuredPromptNodeV2, 22 | GenerateStructuredPromptLiteNodeV2, 23 | ShotByTextAutomaticNode, 24 | ShotByImageManualPaddingNode, 25 | ShotByImageAutomaticAspectRatioNode, 26 | ShotByImageCustomCoordinatesNode, 27 | ShotByImageManualPlacementNode, 28 | ShotByImageAutomaticNode, 29 | ShotByTextAutomaticAspectRatioNode, 30 | ShotByTextManualPlacementNode, 31 | ShotByTextManualPaddingNode, 32 | ShotByTextCustomCoordinatesNode, 33 | AttributionByImageNode, 34 | RemoveVideoBackgroundNode, 35 | VideoSolidColorBackgroundNode, 36 | VideoMaskByPromptNode, 37 | VideoMaskByKeyPointsNode, 38 | VideoIncreaseResolutionNode, 39 | VideoEraseElementsNode, 40 | LoadVideoFramesNode, 41 | PreviewVideoURLNode 42 | ) 43 | 44 | # Map the node class to a name used internally by ComfyUI 45 | NODE_CLASS_MAPPINGS = { 46 | "BriaEraser": EraserNode, # Return the class, not an instance 47 | "BriaGenFill": GenFillNode, 48 | "ImageExpansionNode": ImageExpansionNode, 49 | "ReplaceBgNode": ReplaceBgNode, 50 | "RmbgNode": RmbgNode, 51 | "RemoveForegroundNode": RemoveForegroundNode, 52 | "ShotByTextOriginal": ShotByTextOriginalNode, 53 | "ShotByImageOriginal": ShotByImageOriginalNode, 54 | "ShotByTextAutomatic": ShotByTextAutomaticNode, 55 | "ShotByTextManualPlacement": ShotByTextManualPlacementNode, 56 | "ShotByTextCustomCoordinates": ShotByTextCustomCoordinatesNode, 57 | "ShotByTextManualPadding": ShotByTextManualPaddingNode, 58 | "ShotByTextAutomaticAspectRatio": ShotByTextAutomaticAspectRatioNode, 59 | "ShotByImageAutomatic": ShotByImageAutomaticNode, 60 | "ShotByImageManualPlacement": ShotByImageManualPlacementNode, 61 | "ShotByImageCustomCoordinates": ShotByImageCustomCoordinatesNode, 62 | "ShotByImageManualPadding": ShotByImageManualPaddingNode, 63 | "ShotByImageAutomaticAspectRatio": ShotByImageAutomaticAspectRatioNode, 64 | "BriaTailoredGen": TailoredGenNode, 65 | "TailoredModelInfoNode": TailoredModelInfoNode, 66 | "TailoredPortraitNode": TailoredPortraitNode, 67 | "Text2ImageBaseNode": Text2ImageBaseNode, 68 | "Text2ImageFastNode": Text2ImageFastNode, 69 | "Text2ImageHDNode": Text2ImageHDNode, 70 | "ReimagineNode": ReimagineNode, 71 | "AttributionByImageNode": AttributionByImageNode, 72 | "GenerateImageNodeV2": GenerateImageNodeV2, 73 | "GenerateImageLiteNodeV2": GenerateImageLiteNodeV2, 74 | "RefineImageNodeV2": RefineImageNodeV2, 75 | "RefineImageLiteNodeV2": RefineImageLiteNodeV2, 76 | "GenerateStructuredPromptNodeV2": GenerateStructuredPromptNodeV2, 77 | "GenerateStructuredPromptLiteNodeV2": GenerateStructuredPromptLiteNodeV2, 78 | "RemoveVideoBackgroundNode":RemoveVideoBackgroundNode, 79 | "VideoSolidColorBackgroundNode":VideoSolidColorBackgroundNode, 80 | "VideoMaskByPromptNode":VideoMaskByPromptNode, 81 | "VideoMaskByKeyPointsNode":VideoMaskByKeyPointsNode, 82 | "VideoIncreaseResolutionNode":VideoIncreaseResolutionNode, 83 | "VideoEraseElementsNode":VideoEraseElementsNode, 84 | "LoadVideoFramesNode":LoadVideoFramesNode, 85 | "PreviewVideoURLNode":PreviewVideoURLNode 86 | } 87 | # Map the node display name to the one shown in the ComfyUI node interface 88 | NODE_DISPLAY_NAME_MAPPINGS = { 89 | "BriaEraser": "Bria Eraser", 90 | "BriaGenFill": "Bria GenFill", 91 | "ImageExpansionNode": "Bria Image Expansion", 92 | "ReplaceBgNode": "Bria Replace Background", 93 | "RmbgNode": "Bria RMBG", 94 | "RemoveForegroundNode": "Bria Remove Foreground", 95 | "ShotByTextOriginal": "Shot by Text - Original", 96 | "ShotByImageOriginal": "Shot by Image - Original", 97 | "ShotByTextAutomatic": "Shot by Text - Automatic", 98 | "ShotByTextManualPlacement": "Shot by Text - Manual Placement", 99 | "ShotByTextCustomCoordinates": "Shot by Text - Custom Coordinates", 100 | "ShotByTextManualPadding": "Shot by Text - Manual Padding", 101 | "ShotByTextAutomaticAspectRatio": "Shot by Text - Automatic Aspect Ratio", 102 | "ShotByImageAutomatic": "Shot by Image - Automatic", 103 | "ShotByImageManualPlacement": "Shot by Image - Manual Placement", 104 | "ShotByImageCustomCoordinates": "Shot by Image - Custom Coordinates", 105 | "ShotByImageManualPadding": "Shot by Image - Manual Padding", 106 | "ShotByImageAutomaticAspectRatio": "Shot by Image - Automatic Aspect Ratio", 107 | "BriaTailoredGen": "Bria Tailored Gen", 108 | "TailoredModelInfoNode": "Bria Tailored Model Info", 109 | "TailoredPortraitNode": "Bria Restyle Portrait", 110 | "Text2ImageBaseNode": "Bria Text2Image Base", 111 | "Text2ImageFastNode": "Bria Text2Image Fast", 112 | "Text2ImageHDNode": "Bria Text2Image HD", 113 | "ReimagineNode": "Bria Reimagine", 114 | "AttributionByImageNode": "Attribution By Image Node", 115 | "GenerateImageNodeV2": "Generate Image", 116 | "GenerateImageLiteNodeV2": "Generate Image - Lite", 117 | "RefineImageNodeV2": "Refine and Regenerate Image", 118 | "RefineImageLiteNodeV2": "Refine Image - Lite", 119 | "GenerateStructuredPromptNodeV2": "Generate Structured Prompt", 120 | "GenerateStructuredPromptLiteNodeV2": "Generate Structured Prompt - Lite", 121 | "RemoveVideoBackgroundNode": "Bria Remove Video Background", 122 | "VideoSolidColorBackgroundNode":"Bria SolidColor Background Video", 123 | "VideoMaskByPromptNode":"Bria Video Mask By Prompt", 124 | "VideoMaskByKeyPointsNode":"Bria Video Mask By Key Points", 125 | "VideoIncreaseResolutionNode":"Bria Video Increase Resolution", 126 | "VideoEraseElementsNode":"Bria Video Erase Elements", 127 | "LoadVideoFramesNode":"Bria Load Video", 128 | "PreviewVideoURLNode":"Bria Preview Video" 129 | } 130 | 131 | 132 | -------------------------------------------------------------------------------- /nodes/refine_image_lite_node_v2.py: -------------------------------------------------------------------------------- 1 | import requests 2 | from .common import deserialize_and_get_comfy_key, poll_status_until_completed, postprocess_image 3 | 4 | 5 | 6 | class RefineImageLiteNodeV2: 7 | """Lite Refine Image Node""" 8 | 9 | api_url = "https://engine.prod.bria-api.com/v2/structured_prompt/generate/lite" 10 | generate_api_url = "https://engine.prod.bria-api.com/v2/image/generate/lite" 11 | @classmethod 12 | def INPUT_TYPES(cls): 13 | return { 14 | "required": { 15 | "api_token": ("STRING", {"default": "BRIA_API_TOKEN"}), 16 | "prompt": ("STRING",), 17 | "structured_prompt": ("STRING",), 18 | }, 19 | "optional": { 20 | "model_version": (["FIBO"], {"default": "FIBO"}), 21 | "aspect_ratio": ( 22 | ["1:1", "2:3", "3:2", "3:4", "4:3", "4:5", "5:4", "9:16", "16:9"], 23 | {"default": "1:1"}, 24 | ), 25 | "steps_num": ( 26 | "INT", 27 | { 28 | "default": 8, 29 | "min": 8, 30 | "max": 30, 31 | }, 32 | ), 33 | "guidance_scale": ( 34 | "INT", 35 | { 36 | "default": 5, 37 | "min": 3, 38 | "max": 5, 39 | }, 40 | ), 41 | "seed": ("INT", {"default": 123456}), 42 | }, 43 | } 44 | 45 | 46 | RETURN_TYPES = ("IMAGE", "STRING", "INT") 47 | RETURN_NAMES = ("image", "structured_prompt", "seed") 48 | CATEGORY = "API Nodes" 49 | FUNCTION = "execute" 50 | 51 | def _validate_token(self, api_token: str): 52 | if api_token.strip() == "" or api_token.strip() == "BRIA_API_TOKEN": 53 | raise Exception("Please insert a valid API token.") 54 | 55 | def _build_payload( 56 | self, 57 | prompt, 58 | structured_prompt, 59 | model_version, 60 | aspect_ratio, 61 | steps_num, 62 | guidance_scale, 63 | seed, 64 | ): 65 | payload = { 66 | "prompt": prompt, 67 | "model_version": model_version, 68 | "aspect_ratio": aspect_ratio, 69 | "steps_num": steps_num, 70 | "guidance_scale": guidance_scale, 71 | "seed": seed, 72 | } 73 | if structured_prompt: 74 | payload["structured_prompt"] = structured_prompt 75 | 76 | return payload 77 | 78 | def execute( 79 | self, 80 | api_token, 81 | prompt, 82 | structured_prompt, 83 | model_version, 84 | aspect_ratio, 85 | steps_num, 86 | guidance_scale, 87 | seed 88 | ): 89 | self._validate_token(api_token) 90 | payload = self._build_payload( 91 | prompt, 92 | structured_prompt, 93 | model_version, 94 | aspect_ratio, 95 | steps_num, 96 | guidance_scale, 97 | seed, 98 | ) 99 | api_token = deserialize_and_get_comfy_key(api_token) 100 | headers = {"Content-Type": "application/json", "api_token": api_token} 101 | 102 | try: 103 | response = requests.post(self.api_url, json=payload, headers=headers) 104 | 105 | if response.status_code in (200, 202): 106 | print(f"Initial refine request successful to {self.api_url}, polling for completion...") 107 | response_dict = response.json() 108 | status_url = response_dict.get("status_url") 109 | request_id = response_dict.get("request_id") 110 | 111 | if not status_url: 112 | raise Exception("No status_url returned from API") 113 | 114 | print(f"Request ID: {request_id}, Status URL: {status_url}") 115 | 116 | final_response = poll_status_until_completed(status_url, api_token) 117 | 118 | result = final_response.get("result", {}) 119 | structured_prompt = result.get("structured_prompt", "") 120 | used_seed = result.get("seed", seed) 121 | 122 | # Step 2 to call genearte image 123 | payloadForImageGenetrate = { 124 | "prompt": prompt, 125 | "structured_prompt":structured_prompt, 126 | "model_version": model_version, 127 | "aspect_ratio": aspect_ratio, 128 | "steps_num": steps_num, 129 | "guidance_scale": guidance_scale, 130 | "seed": used_seed, 131 | } 132 | 133 | headers = {"Content-Type": "application/json", "api_token": api_token} 134 | 135 | response = requests.post(self.generate_api_url, json=payloadForImageGenetrate, headers=headers) 136 | 137 | if response.status_code in (200, 202): 138 | print( 139 | f"Initial request successful to {self.generate_api_url}, polling for completion..." 140 | ) 141 | response_dict = response.json() 142 | status_url = response_dict.get("status_url") 143 | request_id = response_dict.get("request_id") 144 | 145 | if not status_url: 146 | raise Exception("No status_url returned from API") 147 | 148 | print(f"Request ID: {request_id}, Status URL: {status_url}") 149 | 150 | final_response = poll_status_until_completed(status_url, api_token) 151 | 152 | result = final_response.get("result", {}) 153 | result_image_url = result.get("image_url") 154 | structured_prompt = result.get("structured_prompt", "") 155 | used_seed = result.get("seed") 156 | 157 | image_response = requests.get(result_image_url) 158 | result_image = postprocess_image(image_response.content) 159 | 160 | return (result_image, structured_prompt, used_seed) 161 | 162 | raise Exception( 163 | f"Error: API request failed with status code {response.status_code} {response.text}" 164 | ) 165 | 166 | except Exception as e: 167 | raise Exception(f"{e}") -------------------------------------------------------------------------------- /nodes/image_expansion_node.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import requests 3 | from PIL import Image 4 | import io 5 | import torch 6 | 7 | from .common import deserialize_and_get_comfy_key, image_to_base64, preprocess_image, poll_status_until_completed 8 | 9 | 10 | class ImageExpansionNode(): 11 | @classmethod 12 | def INPUT_TYPES(self): 13 | return { 14 | "required": { 15 | "image": ("IMAGE",), # Input image from another node 16 | "api_key": ("STRING", {"default": "BRIA_API_TOKEN"}), # API Key input with a default value 17 | }, 18 | 19 | "optional": { 20 | "original_image_size": ("STRING",), 21 | "original_image_location": ("STRING",), 22 | "canvas_size": ("STRING", {"default": "1000, 1000"}), 23 | "aspect_ratio": (["1:1", "2:3", "3:2", "3:4", "4:3", "4:5", "5:4", "9:16", "16:9","None"], {"default": "None"}), 24 | "prompt": ("STRING", {"default": ""}), 25 | "seed": ("INT", {"default": 681794}), 26 | "negative_prompt": ("STRING", {"default": "Ugly, mutated"}), 27 | "prompt_content_moderation": ("BOOLEAN", {"default": False}), 28 | "preserve_alpha": ("BOOLEAN", {"default": True}), 29 | "visual_input_content_moderation": ("BOOLEAN", {"default": False}), 30 | "visual_output_content_moderation": ("BOOLEAN", {"default": False}), 31 | } 32 | } 33 | 34 | RETURN_TYPES = ("IMAGE",) 35 | RETURN_NAMES = ("output_image",) 36 | CATEGORY = "API Nodes" 37 | FUNCTION = "execute" # This is the method that will be executed 38 | 39 | def __init__(self): 40 | self.api_url = "https://engine.prod.bria-api.com/v2/image/edit/expand" # Image Expansion API URL 41 | 42 | # Define the execute method as expected by ComfyUI 43 | def execute(self, image, 44 | original_image_size, 45 | original_image_location, 46 | canvas_size, 47 | aspect_ratio, 48 | prompt, 49 | seed, 50 | negative_prompt, 51 | prompt_content_moderation, 52 | preserve_alpha, 53 | visual_input_content_moderation, 54 | visual_output_content_moderation, 55 | api_key): 56 | if api_key.strip() == "" or api_key.strip() == "BRIA_API_TOKEN": 57 | raise Exception("Please insert a valid API key.") 58 | api_key = deserialize_and_get_comfy_key(api_key) 59 | original_image_size = [int(x.strip()) for x in original_image_size.split(",")] if original_image_size else () 60 | original_image_location = [int(x.strip()) for x in original_image_location.split(",")] if original_image_location else () 61 | canvas_size = [int(x.strip()) for x in canvas_size.split(",")] if canvas_size else () 62 | 63 | if negative_prompt == "": 64 | negative_prompt = " " # hack to avoid error in triton which expects non-empty string 65 | 66 | # Check if image and mask are tensors, if so, convert to NumPy arrays 67 | if isinstance(image, torch.Tensor): 68 | image = preprocess_image(image) 69 | 70 | # Convert the image directly to Base64 string 71 | image_base64 = image_to_base64(image) 72 | if aspect_ratio and aspect_ratio != "None": 73 | payload = { 74 | "image": image_base64, 75 | "aspect_ratio": aspect_ratio, 76 | "prompt": prompt, 77 | "negative_prompt": negative_prompt, 78 | "seed": seed, 79 | "prompt_content_moderation": prompt_content_moderation, 80 | "preserve_alpha": preserve_alpha, 81 | "visual_input_content_moderation": visual_input_content_moderation, 82 | "visual_output_content_moderation": visual_output_content_moderation 83 | } 84 | else: 85 | payload = { 86 | "image": image_base64, 87 | "original_image_size": original_image_size, 88 | "original_image_location": original_image_location, 89 | "canvas_size": canvas_size, 90 | "prompt": prompt, 91 | "negative_prompt": negative_prompt, 92 | "seed": seed, 93 | "prompt_content_moderation": prompt_content_moderation, 94 | "preserve_alpha": preserve_alpha, 95 | "visual_input_content_moderation": visual_input_content_moderation, 96 | "visual_output_content_moderation": visual_output_content_moderation 97 | } 98 | 99 | headers = { 100 | "Content-Type": "application/json", 101 | "api_token": f"{api_key}" 102 | } 103 | 104 | try: 105 | response = requests.post(self.api_url, json=payload, headers=headers) 106 | 107 | if response.status_code == 200 or response.status_code == 202: 108 | print('Initial image expansion request successful, polling for completion...') 109 | response_dict = response.json() 110 | status_url = response_dict.get('status_url') 111 | request_id = response_dict.get('request_id') 112 | 113 | if not status_url: 114 | raise Exception("No status_url returned from API") 115 | 116 | print(f"Request ID: {request_id}, Status URL: {status_url}") 117 | 118 | # Poll status URL until completion 119 | final_response = poll_status_until_completed(status_url, api_key) 120 | 121 | # Get the result image URL 122 | result_image_url = final_response['result']['image_url'] 123 | 124 | # Download and process the result image 125 | image_response = requests.get(result_image_url) 126 | result_image = Image.open(io.BytesIO(image_response.content)) 127 | result_image = result_image.convert("RGB") 128 | result_image = np.array(result_image).astype(np.float32) / 255.0 129 | result_image = torch.from_numpy(result_image)[None,] 130 | 131 | return (result_image,) 132 | else: 133 | raise Exception(f"Error: API request failed with status code {response.status_code}: {response.text}") 134 | 135 | except Exception as e: 136 | raise Exception(f"{e}") 137 | -------------------------------------------------------------------------------- /workflows/product_original_ shot_generation_workflow.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "1cdd7d4c-58b5-4047-947b-1977ad36d364", 3 | "revision": 0, 4 | "last_node_id": 14, 5 | "last_link_id": 11, 6 | "nodes": [ 7 | { 8 | "id": 6, 9 | "type": "PreviewImage", 10 | "pos": [ 11 | 1351.83154296875, 12 | 26.696861267089844 13 | ], 14 | "size": [ 15 | 399.811279296875, 16 | 246 17 | ], 18 | "flags": {}, 19 | "order": 5, 20 | "mode": 0, 21 | "inputs": [ 22 | { 23 | "name": "images", 24 | "type": "IMAGE", 25 | "link": 5 26 | } 27 | ], 28 | "outputs": [], 29 | "properties": { 30 | "Node name for S&R": "PreviewImage" 31 | }, 32 | "widgets_values": [] 33 | }, 34 | { 35 | "id": 1, 36 | "type": "LoadImage", 37 | "pos": [ 38 | 383.92852783203125, 39 | 38.40964889526367 40 | ], 41 | "size": [ 42 | 397.5969543457031, 43 | 314 44 | ], 45 | "flags": {}, 46 | "order": 0, 47 | "mode": 0, 48 | "inputs": [], 49 | "outputs": [ 50 | { 51 | "name": "IMAGE", 52 | "type": "IMAGE", 53 | "links": [ 54 | 1, 55 | 3 56 | ] 57 | }, 58 | { 59 | "name": "MASK", 60 | "type": "MASK", 61 | "links": null 62 | } 63 | ], 64 | "properties": { 65 | "Node name for S&R": "LoadImage" 66 | }, 67 | "widgets_values": [ 68 | "CAR.png", 69 | "image" 70 | ] 71 | }, 72 | { 73 | "id": 2, 74 | "type": "LoadImage", 75 | "pos": [ 76 | 369.0213623046875, 77 | 415.34893798828125 78 | ], 79 | "size": [ 80 | 450.83685302734375, 81 | 314.0000305175781 82 | ], 83 | "flags": {}, 84 | "order": 1, 85 | "mode": 0, 86 | "inputs": [], 87 | "outputs": [ 88 | { 89 | "name": "IMAGE", 90 | "type": "IMAGE", 91 | "links": [ 92 | 4 93 | ] 94 | }, 95 | { 96 | "name": "MASK", 97 | "type": "MASK", 98 | "links": null 99 | } 100 | ], 101 | "properties": { 102 | "Node name for S&R": "LoadImage" 103 | }, 104 | "widgets_values": [ 105 | "seed_713360865.png", 106 | "image" 107 | ] 108 | }, 109 | { 110 | "id": 5, 111 | "type": "Note", 112 | "pos": [ 113 | 951.4669189453125, 114 | 5.085720062255859 115 | ], 116 | "size": [ 117 | 210, 118 | 88 119 | ], 120 | "flags": {}, 121 | "order": 2, 122 | "mode": 0, 123 | "inputs": [], 124 | "outputs": [], 125 | "properties": {}, 126 | "widgets_values": [ 127 | "You can get your BRIA API token at:\nhttps://bria.ai/api/" 128 | ], 129 | "color": "#432", 130 | "bgcolor": "#653" 131 | }, 132 | { 133 | "id": 7, 134 | "type": "PreviewImage", 135 | "pos": [ 136 | 1368.5450439453125, 137 | 340.4889221191406 138 | ], 139 | "size": [ 140 | 387.0335693359375, 141 | 246 142 | ], 143 | "flags": {}, 144 | "order": 6, 145 | "mode": 0, 146 | "inputs": [ 147 | { 148 | "name": "images", 149 | "type": "IMAGE", 150 | "link": 6 151 | } 152 | ], 153 | "outputs": [], 154 | "properties": { 155 | "Node name for S&R": "PreviewImage" 156 | }, 157 | "widgets_values": [] 158 | }, 159 | { 160 | "id": 3, 161 | "type": "ShotByTextOriginal", 162 | "pos": [ 163 | 941.8839111328125, 164 | 149.14889526367188 165 | ], 166 | "size": [ 167 | 273.388671875, 168 | 202 169 | ], 170 | "flags": {}, 171 | "order": 3, 172 | "mode": 0, 173 | "inputs": [ 174 | { 175 | "name": "image", 176 | "type": "IMAGE", 177 | "link": 1 178 | } 179 | ], 180 | "outputs": [ 181 | { 182 | "name": "output_image", 183 | "type": "IMAGE", 184 | "links": [ 185 | 5 186 | ] 187 | } 188 | ], 189 | "properties": { 190 | "Node name for S&R": "ShotByTextOriginal" 191 | }, 192 | "widgets_values": [ 193 | "BRIA_API_TOKEN", 194 | "sea", 195 | "fast", 196 | false, 197 | false, 198 | true, 199 | "" 200 | ] 201 | }, 202 | { 203 | "id": 4, 204 | "type": "ShotByImageOriginal", 205 | "pos": [ 206 | 945.0781860351562, 207 | 441.9688415527344 208 | ], 209 | "size": [ 210 | 272.0703125, 211 | 174 212 | ], 213 | "flags": {}, 214 | "order": 4, 215 | "mode": 0, 216 | "inputs": [ 217 | { 218 | "name": "image", 219 | "type": "IMAGE", 220 | "link": 3 221 | }, 222 | { 223 | "name": "ref_image", 224 | "type": "IMAGE", 225 | "link": 4 226 | } 227 | ], 228 | "outputs": [ 229 | { 230 | "name": "output_image", 231 | "type": "IMAGE", 232 | "links": [ 233 | 6 234 | ] 235 | } 236 | ], 237 | "properties": { 238 | "Node name for S&R": "ShotByImageOriginal" 239 | }, 240 | "widgets_values": [ 241 | "BRIA_API_TOKEN", 242 | false, 243 | false, 244 | true, 245 | 1 246 | ] 247 | } 248 | ], 249 | "links": [ 250 | [ 251 | 1, 252 | 1, 253 | 0, 254 | 3, 255 | 0, 256 | "IMAGE" 257 | ], 258 | [ 259 | 3, 260 | 1, 261 | 0, 262 | 4, 263 | 0, 264 | "IMAGE" 265 | ], 266 | [ 267 | 4, 268 | 2, 269 | 0, 270 | 4, 271 | 1, 272 | "IMAGE" 273 | ], 274 | [ 275 | 5, 276 | 3, 277 | 0, 278 | 6, 279 | 0, 280 | "IMAGE" 281 | ], 282 | [ 283 | 6, 284 | 4, 285 | 0, 286 | 7, 287 | 0, 288 | "IMAGE" 289 | ] 290 | ], 291 | "groups": [], 292 | "config": {}, 293 | "extra": { 294 | "ds": { 295 | "scale": 0.7513148009015777, 296 | "offset": [ 297 | 11.112206386364164, 298 | 66.47311795454547 299 | ] 300 | }, 301 | "frontendVersion": "1.25.11" 302 | }, 303 | "version": 0.4 304 | } -------------------------------------------------------------------------------- /nodes/refine_image_node_v2.py: -------------------------------------------------------------------------------- 1 | import requests 2 | from .common import deserialize_and_get_comfy_key, poll_status_until_completed, postprocess_image 3 | 4 | 5 | class RefineImageNodeV2: 6 | """Standard Refine Image Node""" 7 | 8 | api_url = "https://engine.prod.bria-api.com/v2/structured_prompt/generate" # Must be overridden by subclasses 9 | generate_api_url = "https://engine.prod.bria-api.com/v2/image/generate" 10 | @classmethod 11 | def INPUT_TYPES(cls): 12 | return { 13 | "required": { 14 | "api_token": ("STRING", {"default": "BRIA_API_TOKEN"}), 15 | "prompt": ("STRING",), 16 | "structured_prompt": ("STRING",), 17 | }, 18 | "optional": { 19 | "model_version": (["FIBO"], {"default": "FIBO"}), 20 | "aspect_ratio": ( 21 | ["1:1", "2:3", "3:2", "3:4", "4:3", "4:5", "5:4", "9:16", "16:9"], 22 | {"default": "1:1"}, 23 | ), 24 | "steps_num": ( 25 | "INT", 26 | { 27 | "default": 50, 28 | "min": 35, 29 | "max": 50, 30 | }, 31 | ), 32 | "guidance_scale": ( 33 | "INT", 34 | { 35 | "default": 5, 36 | "min": 3, 37 | "max": 5, 38 | }, 39 | ), 40 | "seed": ("INT", {"default": 123456}), 41 | "negative_prompt": ("STRING", {"default": ""}), 42 | }, 43 | } 44 | 45 | 46 | RETURN_TYPES = ("IMAGE", "STRING", "INT") 47 | RETURN_NAMES = ("image", "structured_prompt", "seed") 48 | CATEGORY = "API Nodes" 49 | FUNCTION = "execute" 50 | 51 | def _validate_token(self, api_token: str): 52 | if api_token.strip() == "" or api_token.strip() == "BRIA_API_TOKEN": 53 | raise Exception("Please insert a valid API token.") 54 | 55 | def _build_payload( 56 | self, 57 | prompt, 58 | structured_prompt, 59 | model_version, 60 | aspect_ratio, 61 | steps_num, 62 | guidance_scale, 63 | seed, 64 | ): 65 | payload = { 66 | "prompt": prompt, 67 | "model_version": model_version, 68 | "aspect_ratio": aspect_ratio, 69 | "steps_num": steps_num, 70 | "guidance_scale": guidance_scale, 71 | "seed": seed, 72 | } 73 | if structured_prompt: 74 | payload["structured_prompt"] = structured_prompt 75 | 76 | return payload 77 | 78 | def execute( 79 | self, 80 | api_token, 81 | prompt, 82 | structured_prompt, 83 | model_version, 84 | aspect_ratio, 85 | steps_num, 86 | guidance_scale, 87 | seed, 88 | negative_prompt=None, 89 | ): 90 | self._validate_token(api_token) 91 | payload = self._build_payload( 92 | prompt, 93 | structured_prompt, 94 | model_version, 95 | aspect_ratio, 96 | steps_num, 97 | guidance_scale, 98 | seed, 99 | ) 100 | api_token = deserialize_and_get_comfy_key(api_token) 101 | headers = {"Content-Type": "application/json", "api_token": api_token} 102 | 103 | try: 104 | response = requests.post(self.api_url, json=payload, headers=headers) 105 | 106 | if response.status_code in (200, 202): 107 | print(f"Initial refine request successful to {self.api_url}, polling for completion...") 108 | response_dict = response.json() 109 | status_url = response_dict.get("status_url") 110 | request_id = response_dict.get("request_id") 111 | 112 | if not status_url: 113 | raise Exception("No status_url returned from API") 114 | 115 | print(f"Request ID: {request_id}, Status URL: {status_url}") 116 | 117 | final_response = poll_status_until_completed(status_url, api_token) 118 | 119 | result = final_response.get("result", {}) 120 | structured_prompt = result.get("structured_prompt", "") 121 | used_seed = result.get("seed", seed) 122 | 123 | # Step 2 to call genearte image 124 | payloadForImageGenetrate = { 125 | "prompt": prompt, 126 | "structured_prompt":structured_prompt, 127 | "model_version": model_version, 128 | "aspect_ratio": aspect_ratio, 129 | "steps_num": steps_num, 130 | "guidance_scale": guidance_scale, 131 | "seed": used_seed, 132 | "negative_prompt":negative_prompt 133 | } 134 | 135 | headers = {"Content-Type": "application/json", "api_token": api_token} 136 | 137 | response = requests.post(self.generate_api_url, json=payloadForImageGenetrate, headers=headers) 138 | 139 | if response.status_code in (200, 202): 140 | print( 141 | f"Initial request successful to {self.generate_api_url}, polling for completion..." 142 | ) 143 | response_dict = response.json() 144 | status_url = response_dict.get("status_url") 145 | request_id = response_dict.get("request_id") 146 | 147 | if not status_url: 148 | raise Exception("No status_url returned from API") 149 | 150 | print(f"Request ID: {request_id}, Status URL: {status_url}") 151 | 152 | final_response = poll_status_until_completed(status_url, api_token) 153 | 154 | result = final_response.get("result", {}) 155 | result_image_url = result.get("image_url") 156 | structured_prompt = result.get("structured_prompt", "") 157 | used_seed = result.get("seed") 158 | 159 | image_response = requests.get(result_image_url) 160 | result_image = postprocess_image(image_response.content) 161 | 162 | return (result_image, structured_prompt, used_seed) 163 | 164 | raise Exception( 165 | f"Error: API request failed with status code {response.status_code} {response.text}" 166 | ) 167 | 168 | except Exception as e: 169 | raise Exception(f"{e}") 170 | -------------------------------------------------------------------------------- /nodes/common.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from PIL import Image 3 | import io 4 | import torch 5 | import base64 6 | from torchvision.transforms import ToPILImage 7 | import requests 8 | import time 9 | import json 10 | 11 | 12 | COMFY_KEY_ERROR = ( 13 | "Invalid Token Type\n\n" 14 | "The API token you’ve entered is not a ComfyUI token.\n" 15 | "Please use the valid token from your BRIA Account API Keys page:\n" 16 | "https://platform.bria.ai/console/account/api-keys" 17 | ) 18 | 19 | def postprocess_image(image): 20 | result_image = Image.open(io.BytesIO(image)) 21 | result_image = result_image.convert("RGB") 22 | result_image = np.array(result_image).astype(np.float32) / 255.0 23 | result_image = torch.from_numpy(result_image)[None,] 24 | return result_image 25 | 26 | def image_to_base64(pil_image): 27 | # Convert a PIL image to a base64-encoded string 28 | buffered = io.BytesIO() 29 | pil_image.save(buffered, format="PNG") # Save the image to the buffer in PNG format 30 | buffered.seek(0) # Rewind the buffer to the beginning 31 | return base64.b64encode(buffered.getvalue()).decode('utf-8') 32 | 33 | def preprocess_image(image): 34 | if isinstance(image, torch.Tensor): 35 | # Print image shape for debugging 36 | if image.dim() == 4: # (batch_size, height, width, channels) 37 | image = image.squeeze(0) # Remove the batch dimension (1) 38 | # Convert to PIL after permuting to (height, width, channels) 39 | image = ToPILImage()(image.permute(2, 0, 1)) # (height, width, channels) 40 | else: 41 | print("Unexpected image dimensions. Expected 4D tensor.") 42 | return image 43 | 44 | 45 | def preprocess_mask(mask): 46 | if isinstance(mask, torch.Tensor): 47 | # Print mask shape for debugging 48 | if mask.dim() == 3: # (batch_size, height, width) 49 | mask = mask.squeeze(0) # Remove the batch dimension (1) 50 | # Convert to PIL (grayscale mask) 51 | mask = ToPILImage()(mask) # No permute needed for grayscale 52 | else: 53 | print("Unexpected mask dimensions. Expected 3D tensor.") 54 | return mask 55 | 56 | 57 | def process_request(api_url, image, mask, api_key, visual_input_content_moderation, visual_output_content_moderation): 58 | if api_key.strip() == "" or api_key.strip() == "BRIA_API_TOKEN": 59 | raise Exception("Please insert a valid API key.") 60 | api_key = deserialize_and_get_comfy_key(api_key) 61 | 62 | # Check if image and mask are tensors, if so, convert to NumPy arrays 63 | if isinstance(image, torch.Tensor): 64 | image = preprocess_image(image) 65 | if isinstance(mask, torch.Tensor): 66 | mask = preprocess_mask(mask) 67 | 68 | # Convert the image and mask directly to Base64 strings 69 | image_base64 = image_to_base64(image) 70 | mask_base64 = image_to_base64(mask) 71 | 72 | # Prepare the API request payload for v2 API 73 | payload = { 74 | "image": image_base64, 75 | "mask": mask_base64, 76 | "visual_input_content_moderation":visual_input_content_moderation, 77 | "visual_output_content_moderation":visual_output_content_moderation 78 | } 79 | 80 | headers = { 81 | "Content-Type": "application/json", 82 | "api_token": f"{api_key}" 83 | } 84 | 85 | try: 86 | response = requests.post(api_url, json=payload, headers=headers) 87 | if response.status_code == 200 or response.status_code == 202: 88 | print('Initial request successful, polling for completion...') 89 | response_dict = response.json() 90 | status_url = response_dict.get('status_url') 91 | request_id = response_dict.get('request_id') 92 | 93 | if not status_url: 94 | raise Exception("No status_url returned from API") 95 | 96 | print(f"Request ID: {request_id}, Status URL: {status_url}") 97 | 98 | final_response = poll_status_until_completed(status_url, api_key) 99 | result_image_url = final_response['result']['image_url'] 100 | 101 | # Download and process the result image 102 | image_response = requests.get(result_image_url) 103 | result_image = Image.open(io.BytesIO(image_response.content)) 104 | result_image = result_image.convert("RGBA") 105 | result_image = np.array(result_image).astype(np.float32) / 255.0 106 | result_image = torch.from_numpy(result_image)[None,] 107 | # image_tensor = image_tensor = ToTensor()(output_image) 108 | # image_tensor = image_tensor.permute(1, 2, 0) / 255.0 # Shape now becomes [1, 2200, 1548, 3] 109 | # print(f"output tensor shape is: {image_tensor.shape}") 110 | return (result_image,) 111 | else: 112 | raise Exception(f"Error: API request failed with status code {response.status_code} {response.text}") 113 | 114 | except Exception as e: 115 | raise Exception(f"{e}") 116 | 117 | 118 | def poll_status_until_completed(status_url, api_key, timeout=360, check_interval=2): 119 | """ 120 | Poll a status URL until the status is COMPLETED or timeout is reached. 121 | 122 | Args: 123 | status_url (str): The status URL to poll 124 | api_key (str): API token for authentication 125 | timeout (int): Maximum time to wait in seconds (default: 360) 126 | check_interval (int): Time between checks in seconds (default: 2) 127 | 128 | Returns: 129 | dict: The final response containing the result 130 | 131 | Raises: 132 | Exception: If timeout is reached or API request fails 133 | """ 134 | start_time = time.time() 135 | headers = {"api_token": api_key} 136 | 137 | while time.time() - start_time < timeout: 138 | try: 139 | response = requests.get(status_url, headers=headers) 140 | if response.status_code == 200 or response.status_code == 202: 141 | response_dict = response.json() 142 | status = response_dict.get("status", "").upper() 143 | 144 | if status == "COMPLETED": 145 | return response_dict 146 | elif status == "ERROR": 147 | raise Exception(f"Request failed: {response_dict}") 148 | else: 149 | print(f"Status: {status}, waiting...") 150 | time.sleep(check_interval) 151 | else: 152 | raise Exception(f"Status check failed with status code {response.status_code}") 153 | 154 | except requests.exceptions.RequestException as e: 155 | raise Exception(f"Error checking status: {e}") 156 | 157 | raise Exception(f"Timeout reached after {timeout} seconds") 158 | 159 | def deserialize_and_get_comfy_key(encoded: str) -> str: 160 | """ 161 | Decodes a base64-encoded JSON token and returns the ComfyUI API key. 162 | """ 163 | try: 164 | decoded = base64.b64decode(encoded).decode("utf-8") 165 | payload = json.loads(decoded) 166 | 167 | if payload.get("type") != "comfy": 168 | raise Exception(COMFY_KEY_ERROR) 169 | 170 | return payload.get("apiKey") 171 | 172 | except Exception as e: 173 | raise Exception(COMFY_KEY_ERROR) 174 | 175 | 176 | -------------------------------------------------------------------------------- /nodes/utils/shot_utils.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import torch 3 | from ..common import deserialize_and_get_comfy_key, postprocess_image, preprocess_image, image_to_base64 4 | 5 | shot_by_text_api_url = ( 6 | "https://engine.prod.bria-api.com/v1/product/lifestyle_shot_by_text" 7 | ) 8 | shot_by_image_api_url = ( 9 | "https://engine.prod.bria-api.com/v1/product/lifestyle_shot_by_image" 10 | ) 11 | 12 | from enum import Enum 13 | 14 | class PlacementType(str, Enum): 15 | ORIGINAL = "original" 16 | AUTOMATIC = "automatic" 17 | MANUAL_PLACEMENT = "manual_placement" 18 | MANUAL_PADDING = "manual_padding" 19 | CUSTOM_COORDINATES = "custom_coordinates" 20 | AUTOMATIC_ASPECT_RATIO = "automatic_aspect_ratio" 21 | 22 | 23 | 24 | def validate_api_key(api_key): 25 | """Validate API key input""" 26 | if api_key.strip() == "" or api_key.strip() == "BRIA_API_TOKEN": 27 | raise Exception("Please insert a valid API key.") 28 | 29 | 30 | def update_payload_for_placement(placement_type, payload, **kwargs): 31 | if placement_type == PlacementType.AUTOMATIC.value: 32 | payload["shot_size"] = [ 33 | int(x.strip()) for x in kwargs.get("shot_size").split(",") 34 | ] 35 | elif placement_type == PlacementType.MANUAL_PLACEMENT.value: 36 | payload["shot_size"] = [ 37 | int(x.strip()) for x in kwargs.get("shot_size").split(",") 38 | ] 39 | payload["manual_placement_selection"] = [ 40 | kwargs.get("manual_placement_selection", "upper_left") 41 | ] 42 | elif placement_type == PlacementType.CUSTOM_COORDINATES.value: 43 | payload["shot_size"] = [ 44 | int(x.strip()) for x in kwargs.get("shot_size").split(",") 45 | ] 46 | payload["foreground_image_size"] = [ 47 | int(x.strip()) for x in kwargs.get("foreground_image_size").split(",") 48 | ] 49 | payload["foreground_image_location"] = [ 50 | int(x.strip()) for x in kwargs.get("foreground_image_location").split(",") 51 | ] 52 | elif placement_type == PlacementType.MANUAL_PADDING.value: 53 | payload["padding_values"] = [ 54 | int(x.strip()) for x in kwargs.get("padding_values").split(",") 55 | ] 56 | 57 | elif placement_type == PlacementType.AUTOMATIC_ASPECT_RATIO.value: 58 | payload["aspect_ratio"] = kwargs.get("aspect_ratio", "1:1") 59 | elif placement_type == PlacementType.ORIGINAL.value: 60 | payload["original_quality"] = kwargs.get("original_quality", True) 61 | 62 | return payload 63 | 64 | 65 | def create_text_payload( 66 | image, api_key, scene_description, mode, placement_type, **kwargs 67 | ): 68 | 69 | validate_api_key(api_key) 70 | 71 | 72 | # Process image 73 | if isinstance(image, torch.Tensor): 74 | image = preprocess_image(image) 75 | 76 | image_base64 = image_to_base64(image) 77 | 78 | payload = { 79 | "file": image_base64, 80 | "placement_type": placement_type, 81 | "sync": True, 82 | "num_results": 1, 83 | "force_rmbg": kwargs.get("force_rmbg", False), 84 | "content_moderation": kwargs.get("content_moderation", False), 85 | "scene_description": scene_description, 86 | "mode": mode, 87 | "optimize_description": kwargs.get("optimize_description", True), 88 | } 89 | 90 | if kwargs.get("exclude_elements", "").strip(): 91 | payload["exclude_elements"] = kwargs["exclude_elements"] 92 | 93 | payload = update_payload_for_placement(placement_type, payload, **kwargs) 94 | 95 | return payload 96 | 97 | 98 | def create_image_payload(image, ref_image, api_key, placement_type, **kwargs): 99 | """Create payload for image-based shot nodes""" 100 | validate_api_key(api_key) 101 | 102 | if isinstance(image, torch.Tensor): 103 | image = preprocess_image(image) 104 | if isinstance(ref_image, torch.Tensor): 105 | ref_image = preprocess_image(ref_image) 106 | 107 | image_base64 = image_to_base64(image) 108 | ref_image_base64 = image_to_base64(ref_image) 109 | 110 | # Base payload 111 | payload = { 112 | "file": image_base64, 113 | "ref_image_file": ref_image_base64, 114 | "enhance_ref_image": kwargs.get("enhance_ref_image", True), 115 | "ref_image_influence": kwargs.get("ref_image_influence", 1.0), 116 | "placement_type": placement_type, 117 | "sync": True, 118 | "num_results": 1, 119 | "force_rmbg": kwargs.get("force_rmbg", False), 120 | "content_moderation": kwargs.get("content_moderation", False), 121 | } 122 | 123 | payload = update_payload_for_placement(placement_type, payload, **kwargs) 124 | 125 | return payload 126 | 127 | 128 | def make_api_request(api_url, payload, api_key, Placement_type = None): 129 | """Make API request and return processed image""" 130 | 131 | 132 | try: 133 | api_key = deserialize_and_get_comfy_key(api_key) 134 | headers = {"Content-Type": "application/json", "api_token": f"{api_key}"} 135 | response = requests.post(api_url, json=payload, headers=headers) 136 | 137 | if response.status_code == 200: 138 | print("response is 200") 139 | response_dict = response.json() 140 | if Placement_type == PlacementType.AUTOMATIC.value: 141 | result_images = [] 142 | for i, result in enumerate(response_dict.get("result", [])[:7]): 143 | image_url = result[0] 144 | image_response = requests.get(image_url) 145 | processed = postprocess_image(image_response.content) 146 | result_images.append(processed) 147 | 148 | # If less than 7 images, pad with None to match ComfyUI return structure 149 | while len(result_images) < 7: 150 | result_images.append(None) 151 | print(result_images) 152 | 153 | return tuple(result_images) 154 | 155 | image_response = requests.get(response_dict["result"][0][0]) 156 | result_image = postprocess_image(image_response.content) 157 | return (result_image,) 158 | else: 159 | raise Exception( 160 | f"Error: API request failed with status code {response.status_code}{response.text}" 161 | ) 162 | 163 | except Exception as e: 164 | raise Exception(f"{e}") 165 | 166 | 167 | def get_common_input_types(): 168 | """Get common input types for all nodes""" 169 | return { 170 | "required": {"api_key": ("STRING", {"default": "BRIA_API_TOKEN"})}, 171 | "optional": { 172 | "force_rmbg": ("BOOLEAN", {"default": False}), 173 | "content_moderation": ("BOOLEAN", {"default": False}), 174 | }, 175 | } 176 | 177 | 178 | def get_text_input_types(): 179 | """Get text-specific input types""" 180 | common = get_common_input_types() 181 | common["required"].update( 182 | { 183 | "image": ("IMAGE",), 184 | "scene_description": ("STRING",), 185 | "mode": (["base", "fast", "high_control"], {"default": "fast"}), 186 | } 187 | ) 188 | common["optional"].update( 189 | { 190 | "optimize_description": ("BOOLEAN", {"default": True}), 191 | "exclude_elements": ("STRING", {"default": ""}), 192 | } 193 | ) 194 | return common 195 | 196 | 197 | def get_image_input_types(): 198 | """Get image-specific input types""" 199 | common = get_common_input_types() 200 | common["required"].update({"image": ("IMAGE",), "ref_image": ("IMAGE",)}) 201 | common["optional"].update( 202 | { 203 | "enhance_ref_image": ("BOOLEAN", {"default": True}), 204 | "ref_image_influence": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0}), 205 | } 206 | ) 207 | return common 208 | -------------------------------------------------------------------------------- /Readme.md: -------------------------------------------------------------------------------- 1 | # BRIA ComfyUI API Nodes 2 | 3 |
4 |
5 |
21 |
22 |