├── .gitignore ├── .python-version ├── Dockerfile ├── Flux-Dev-ComfyUI-Workflow.json ├── LICENSE ├── README.md ├── pyproject.toml ├── smithery.yaml ├── src └── comfy_mcp_server │ └── __init__.py └── uv.lock /.gitignore: -------------------------------------------------------------------------------- 1 | # Python-generated files 2 | __pycache__/ 3 | *.py[oc] 4 | build/ 5 | dist/ 6 | wheels/ 7 | *.egg-info 8 | 9 | # Virtual environments 10 | .venv 11 | .aider* 12 | .envrc 13 | -------------------------------------------------------------------------------- /.python-version: -------------------------------------------------------------------------------- 1 | 3.10 2 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Generated by https://smithery.ai. See: https://smithery.ai/docs/config#dockerfile 2 | # Use the latest Python image 3 | FROM python:3.12-slim-bookworm 4 | 5 | # Set the working directory 6 | WORKDIR /app 7 | 8 | # Copy the project files 9 | COPY . . 10 | 11 | # Install the dependencies 12 | RUN pip install --no-cache-dir "mcp[cli]>=1.2.1" 13 | 14 | # Set environment variable for the Comfy server URL 15 | # This can be overridden at runtime with `docker run -e COMFY_URL=http://your-comfy-server-url:port ...` 16 | ENV COMFY_URL=http://default-comfy-server-url:port 17 | 18 | # Expose any necessary ports (if applicable) 19 | # EXPOSE 8080 20 | 21 | # Run the server 22 | ENTRYPOINT ["python", "comfy-mcp-server.py"] 23 | -------------------------------------------------------------------------------- /Flux-Dev-ComfyUI-Workflow.json: -------------------------------------------------------------------------------- 1 | { 2 | "6": { 3 | "inputs": { 4 | "text": "tileset, pixalated, 2d, sprites", 5 | "clip": [ 6 | "11", 7 | 0 8 | ] 9 | }, 10 | "class_type": "CLIPTextEncode", 11 | "_meta": { 12 | "title": "CLIP Text Encode (Positive Prompt)" 13 | } 14 | }, 15 | "8": { 16 | "inputs": { 17 | "samples": [ 18 | "13", 19 | 0 20 | ], 21 | "vae": [ 22 | "10", 23 | 0 24 | ] 25 | }, 26 | "class_type": "VAEDecode", 27 | "_meta": { 28 | "title": "VAE Decode" 29 | } 30 | }, 31 | "9": { 32 | "inputs": { 33 | "filename_prefix": "ComfyUI", 34 | "images": [ 35 | "8", 36 | 0 37 | ] 38 | }, 39 | "class_type": "SaveImage", 40 | "_meta": { 41 | "title": "Save Image" 42 | } 43 | }, 44 | "10": { 45 | "inputs": { 46 | "vae_name": "ae.safetensors" 47 | }, 48 | "class_type": "VAELoader", 49 | "_meta": { 50 | "title": "Load VAE" 51 | } 52 | }, 53 | "11": { 54 | "inputs": { 55 | "clip_name1": "t5xxl_fp8_e4m3fn.safetensors", 56 | "clip_name2": "clip_l.safetensors", 57 | "type": "flux", 58 | "device": "default" 59 | }, 60 | "class_type": "DualCLIPLoader", 61 | "_meta": { 62 | "title": "DualCLIPLoader" 63 | } 64 | }, 65 | "12": { 66 | "inputs": { 67 | "unet_name": "flux1-dev.safetensors", 68 | "weight_dtype": "default" 69 | }, 70 | "class_type": "UNETLoader", 71 | "_meta": { 72 | "title": "Load Diffusion Model" 73 | } 74 | }, 75 | "13": { 76 | "inputs": { 77 | "noise": [ 78 | "25", 79 | 0 80 | ], 81 | "guider": [ 82 | "22", 83 | 0 84 | ], 85 | "sampler": [ 86 | "16", 87 | 0 88 | ], 89 | "sigmas": [ 90 | "17", 91 | 0 92 | ], 93 | "latent_image": [ 94 | "27", 95 | 0 96 | ] 97 | }, 98 | "class_type": "SamplerCustomAdvanced", 99 | "_meta": { 100 | "title": "SamplerCustomAdvanced" 101 | } 102 | }, 103 | "16": { 104 | "inputs": { 105 | "sampler_name": "euler" 106 | }, 107 | "class_type": "KSamplerSelect", 108 | "_meta": { 109 | "title": "KSamplerSelect" 110 | } 111 | }, 112 | "17": { 113 | "inputs": { 114 | "scheduler": "simple", 115 | "steps": 20, 116 | "denoise": 1, 117 | "model": [ 118 | "30", 119 | 0 120 | ] 121 | }, 122 | "class_type": "BasicScheduler", 123 | "_meta": { 124 | "title": "BasicScheduler" 125 | } 126 | }, 127 | "22": { 128 | "inputs": { 129 | "model": [ 130 | "30", 131 | 0 132 | ], 133 | "conditioning": [ 134 | "26", 135 | 0 136 | ] 137 | }, 138 | "class_type": "BasicGuider", 139 | "_meta": { 140 | "title": "BasicGuider" 141 | } 142 | }, 143 | "25": { 144 | "inputs": { 145 | "noise_seed": 1113224572589433 146 | }, 147 | "class_type": "RandomNoise", 148 | "_meta": { 149 | "title": "RandomNoise" 150 | } 151 | }, 152 | "26": { 153 | "inputs": { 154 | "guidance": 3.5, 155 | "conditioning": [ 156 | "6", 157 | 0 158 | ] 159 | }, 160 | "class_type": "FluxGuidance", 161 | "_meta": { 162 | "title": "FluxGuidance" 163 | } 164 | }, 165 | "27": { 166 | "inputs": { 167 | "width": 512, 168 | "height": 512, 169 | "batch_size": 1 170 | }, 171 | "class_type": "EmptySD3LatentImage", 172 | "_meta": { 173 | "title": "EmptySD3LatentImage" 174 | } 175 | }, 176 | "30": { 177 | "inputs": { 178 | "max_shift": 1.15, 179 | "base_shift": 0.5, 180 | "width": 512, 181 | "height": 512, 182 | "model": [ 183 | "12", 184 | 0 185 | ] 186 | }, 187 | "class_type": "ModelSamplingFlux", 188 | "_meta": { 189 | "title": "ModelSamplingFlux" 190 | } 191 | } 192 | } -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 Karim Lalani 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Comfy MCP Server 2 | 3 | [![smithery badge](https://smithery.ai/badge/@lalanikarim/comfy-mcp-server)](https://smithery.ai/server/@lalanikarim/comfy-mcp-server) 4 | 5 | > A server using FastMCP framework to generate images based on prompts via a remote Comfy server. 6 | 7 | ## Overview 8 | 9 | This script sets up a server using the FastMCP framework to generate images based on prompts using a specified workflow. It interacts with a remote Comfy server to submit prompts and retrieve generated images. 10 | 11 | ## Prerequisites 12 | 13 | - [uv](https://docs.astral.sh/uv/) package and project manager for Python. 14 | - Workflow file exported from Comfy UI. This code includes a sample `Flux-Dev-ComfyUI-Workflow.json` which is only used here as reference. You will need to export from your workflow and set the environment variables accordingly. 15 | 16 | You can install the required packages for local development: 17 | 18 | ```bash 19 | uvx mcp[cli] 20 | ``` 21 | 22 | ## Configuration 23 | 24 | Set the following environment variables: 25 | 26 | - `COMFY_URL` to point to your Comfy server URL. 27 | - `COMFY_WORKFLOW_JSON_FILE` to point to the absolute path of the API export json file for the comfyui workflow. 28 | - `PROMPT_NODE_ID` to the id of the text prompt node. 29 | - `OUTPUT_NODE_ID` to the id of the output node with the final image. 30 | - `OUTPUT_MODE` to either `url` or `file` to select desired output. 31 | 32 | Optionally, if you have an [Ollama](https://ollama.com) server running, you can connect to it for prompt generation. 33 | 34 | - `OLLAMA_API_BASE` to the url where ollama is running. 35 | - `PROMPT_LLM` to the name of the model hosted on ollama for prompt generation. 36 | 37 | Example: 38 | 39 | ```bash 40 | export COMFY_URL=http://your-comfy-server-url:port 41 | export COMFY_WORKFLOW_JSON_FILE=/path/to/the/comfyui_workflow_export.json 42 | export PROMPT_NODE_ID=6 # use the correct node id here 43 | export OUTPUT_NODE_ID=9 # use the correct node id here 44 | export OUTPUT_MODE=file 45 | ``` 46 | 47 | ## Usage 48 | 49 | Comfy MCP Server can be launched by the following command: 50 | 51 | ```bash 52 | uvx comfy-mcp-server 53 | ``` 54 | 55 | ### Example Claude Desktop Config 56 | 57 | ```json 58 | { 59 | "mcpServers": { 60 | "Comfy MCP Server": { 61 | "command": "/path/to/uvx", 62 | "args": [ 63 | "comfy-mcp-server" 64 | ], 65 | "env": { 66 | "COMFY_URL": "http://your-comfy-server-url:port", 67 | "COMFY_WORKFLOW_JSON_FILE": "/path/to/the/comfyui_workflow_export.json", 68 | "PROMPT_NODE_ID": "6", 69 | "OUTPUT_NODE_ID": "9", 70 | "OUTPUT_MODE": "file", 71 | } 72 | } 73 | } 74 | } 75 | 76 | ``` 77 | 78 | ## Functionality 79 | 80 | ### `generate_image(prompt: str, ctx: Context) -> Image | str` 81 | 82 | This function generates an image using a specified prompt. It follows these steps: 83 | 84 | 1. Checks if all the environment variable are set. 85 | 2. Loads a prompt template from a JSON file. 86 | 3. Submits the prompt to the Comfy server. 87 | 4. Polls the server for the status of the prompt processing. 88 | 5. Retrieves and returns the generated image once it's ready. 89 | 90 | ### `generate_prompt(topic: str, ctx: Context) -> str` 91 | 92 | This function generates a comprehensive image generation prompt from specified topic. 93 | 94 | ## Dependencies 95 | 96 | - `mcp`: For setting up the FastMCP server. 97 | - `json`: For handling JSON data. 98 | - `urllib`: For making HTTP requests. 99 | - `time`: For adding delays in polling. 100 | - `os`: For accessing environment variables. 101 | - `langchain`: For creating simple LLM Prompt chain to generate image generation prompt from topic. 102 | - `langchain-ollama`: For ollama specific modules for LangChain. 103 | 104 | ## License 105 | 106 | This project is licensed under the MIT License - see the [LICENSE](https://github.com/lalanikarim/comfy-mcp-server/blob/main/LICENSE) file for details. 107 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "comfy-mcp-server" 3 | version = "0.1.11" 4 | description = "MCP Server for ComfyUI text to image Workflow" 5 | readme = "README.md" 6 | requires-python = ">=3.10" 7 | dependencies = [ 8 | "langchain>=0.3.19", 9 | "langchain-ollama>=0.2.3", 10 | "mcp[cli]>=1.2.1", 11 | ] 12 | authors = [ 13 | {name = "Karim Lalani", email = "jimmy00784@gmail.com"}, 14 | ] 15 | maintainers = [ 16 | {name = "Karim Lalani", email = "jimmy00784@gmail.com"}, 17 | ] 18 | license = "MIT" 19 | license-files = ["LICENSE"] 20 | 21 | [project.urls] 22 | Homepage = "https://github.com/lalanikarim/comfy-mcp-server/" 23 | Repository = "https://github.com/lalanikarim/comfy-mcp-server/" 24 | 25 | [project.scripts] 26 | comfy-mcp-server = "comfy_mcp_server:run_server" 27 | 28 | [build-system] 29 | requires = ["hatchling"] 30 | build-backend = "hatchling.build" 31 | 32 | [tool.hatch.build.targets.wheel] 33 | packages = ["src/comfy_mcp_server"] 34 | -------------------------------------------------------------------------------- /smithery.yaml: -------------------------------------------------------------------------------- 1 | # Smithery configuration file: https://smithery.ai/docs/config#smitheryyaml 2 | 3 | startCommand: 4 | type: stdio 5 | configSchema: 6 | # JSON Schema defining the configuration options for the MCP. 7 | type: object 8 | required: 9 | - comfyUrl 10 | properties: 11 | comfyUrl: 12 | type: string 13 | description: The URL for the Comfy server, including port. 14 | commandFunction: 15 | # A function that produces the CLI command to start the MCP on stdio. 16 | |- 17 | (config) => ({command:'python',args:['comfy-mcp-server.py'],env:{COMFY_URL:config.comfyUrl}}) 18 | -------------------------------------------------------------------------------- /src/comfy_mcp_server/__init__.py: -------------------------------------------------------------------------------- 1 | from mcp.server.fastmcp import FastMCP, Image, Context 2 | import json 3 | import urllib 4 | from urllib import request 5 | import time 6 | import os 7 | from langchain_ollama.chat_models import ChatOllama 8 | from langchain_core.prompts import PromptTemplate 9 | from langchain_core.output_parsers import StrOutputParser 10 | 11 | mcp = FastMCP("Comfy MCP Server") 12 | 13 | host = os.environ.get("COMFY_URL") 14 | override_host = os.environ.get("COMFY_URL_EXTERNAL") 15 | if override_host is None: 16 | override_host = host 17 | workflow = os.environ.get("COMFY_WORKFLOW_JSON_FILE") 18 | 19 | prompt_template = json.load( 20 | open(workflow, "r") 21 | ) if workflow is not None else None 22 | 23 | prompt_node_id = os.environ.get("PROMPT_NODE_ID") 24 | output_node_id = os.environ.get("OUTPUT_NODE_ID") 25 | output_mode = os.environ.get("OUTPUT_MODE") 26 | 27 | ollama_api_base = os.environ.get("OLLAMA_API_BASE") 28 | prompt_llm = os.environ.get("PROMPT_LLM") 29 | 30 | 31 | def get_file_url(server: str, url_values: str) -> str: 32 | return f"{server}/view?{url_values}" 33 | 34 | 35 | if ollama_api_base is not None and prompt_llm is not None: 36 | @mcp.tool() 37 | def generate_prompt(topic: str, ctx: Context) -> str: 38 | """Write an image generation prompt for a provided topic""" 39 | 40 | model = ChatOllama(base_url=ollama_api_base, model=prompt_llm) 41 | prompt = PromptTemplate.from_template("""You are an AI Image Generation Prompt Assistant. 42 | Your job is to review the topic provided by the user for an image generation task and create 43 | an appropriate prompt from it. Repond with a single prompt. Don't ask for feedback about the prompt. 44 | 45 | Topic: {topic} 46 | Prompt: """) 47 | chain = prompt | model | StrOutputParser() 48 | response = chain.invoke({"topic": topic}) 49 | return response 50 | 51 | 52 | @mcp.tool() 53 | def generate_image(prompt: str, ctx: Context) -> Image | str: 54 | """Generate an image using ComfyUI workflow""" 55 | 56 | prompt_template[prompt_node_id]['inputs']['text'] = prompt 57 | p = {"prompt": prompt_template} 58 | data = json.dumps(p).encode('utf-8') 59 | req = request.Request(f"{host}/prompt", data) 60 | resp = request.urlopen(req) 61 | response_ready = False 62 | if resp.status == 200: 63 | ctx.info("Submitted prompt") 64 | resp_data = json.loads(resp.read()) 65 | prompt_id = resp_data["prompt_id"] 66 | 67 | for t in range(0, 20): 68 | history_req = request.Request( 69 | f"{host}/history/{prompt_id}") 70 | history_resp = request.urlopen(history_req) 71 | if history_resp.status == 200: 72 | ctx.info("Checking status...") 73 | history_resp_data = json.loads(history_resp.read()) 74 | if prompt_id in history_resp_data: 75 | status = ( 76 | history_resp_data[prompt_id]['status']['completed'] 77 | ) 78 | if status: 79 | output_data = ( 80 | history_resp_data[prompt_id] 81 | ['outputs'][output_node_id]['images'][0] 82 | ) 83 | url_values = urllib.parse.urlencode(output_data) 84 | file_url = get_file_url(host, url_values) 85 | override_file_url = get_file_url( 86 | override_host, url_values) 87 | file_req = request.Request(file_url) 88 | file_resp = request.urlopen(file_req) 89 | if file_resp.status == 200: 90 | ctx.info("Image generated") 91 | output_file = file_resp.read() 92 | response_ready = True 93 | break 94 | else: 95 | time.sleep(1) 96 | else: 97 | time.sleep(1) 98 | 99 | if response_ready: 100 | if output_mode is not None and output_mode.lower() == "url": 101 | return override_file_url 102 | return Image(data=output_file, format="png") 103 | else: 104 | return "Failed to generate image. Please check server logs." 105 | 106 | 107 | def run_server(): 108 | errors = [] 109 | if host is None: 110 | errors.append("- COMFY_URL environment variable not set") 111 | if workflow is None: 112 | errors.append( 113 | "- COMFY_WORKFLOW_JSON_FILE environment variable not set") 114 | if prompt_node_id is None: 115 | errors.append("- PROMPT_NODE_ID environment variable not set") 116 | if output_node_id is None: 117 | errors.append("- OUTPUT_NODE_ID environment variable not set") 118 | 119 | if len(errors) > 0: 120 | errors = ["Failed to start Comfy MCP Server:"] + errors 121 | return "\n".join(errors) 122 | else: 123 | mcp.run() 124 | 125 | 126 | if __name__ == "__main__": 127 | run_server() 128 | --------------------------------------------------------------------------------