├── .eslintrc.json
├── .github
└── ISSUE_TEMPLATE
│ └── bug_report.md
├── .gitignore
├── .husky
└── pre-commit
├── CODE_OF_CONDUCT.md
├── LICENSE
├── README.md
├── __init__.py
├── docs
└── get-started.md
├── js
├── assistant
│ ├── core.js
│ ├── pet.js
│ └── state.js
├── auth
│ ├── index.js
│ ├── login.js
│ └── register.js
├── button
│ ├── actions.js
│ ├── dependencies.js
│ ├── index.js
│ ├── support.js
│ └── ui.js
├── chatbot
│ ├── chatbot.js.dep
│ ├── edgeTypes.js.dep
│ ├── ext.js
│ └── workflow.js
├── comfy
│ ├── comfy.js
│ ├── ext.js
│ └── ui.js
├── constants.js
├── index.js
├── lib
│ ├── van-ui.js
│ └── van.js
├── node
│ ├── dialogs.js
│ └── index.js
├── resource
│ ├── endpoints.js
│ ├── index.js
│ ├── local.js
│ └── utils.js
├── store.js
├── ui
│ ├── credits.js
│ ├── form.js
│ ├── html.js
│ ├── runDetails.js
│ ├── table.js
│ └── uploadProgress.js
└── utils.js
├── package-lock.json
├── package.json
├── prestartup_script.py
├── python
├── chat
│ ├── classes.py
│ ├── comfy_types.py
│ ├── format.py
│ └── graph.py
├── custom_routes.py
├── test.py
├── upload
│ ├── __init__.py
│ ├── blob.py
│ ├── hash.py
│ ├── net.py
│ ├── progress.py
│ ├── spec.py
│ └── sync.py
├── user.py
└── utils
│ ├── custom_nodes.py
│ ├── paths.py
│ ├── requirements.py
│ └── task.py
└── requirements.txt
/.eslintrc.json:
--------------------------------------------------------------------------------
1 | {
2 | "env": {
3 | "browser": true,
4 | "es2021": true
5 | },
6 | "extends": "eslint:recommended",
7 | "parserOptions": {
8 | "ecmaVersion": "latest",
9 | "sourceType": "module"
10 | },
11 | "globals": {
12 | "LiteGraph": true,
13 | "LGraphCanvas": true
14 | },
15 | "rules": {
16 | }
17 | }
18 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug report
3 | about: Create a report to help us improve
4 | title: ''
5 | labels: ''
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Describe the bug**
11 | A clear and concise description of what the bug is.
12 |
13 | **To Reproduce**
14 | Steps to reproduce the behavior:
15 | 1. Go to '...'
16 | 2. Click on '....'
17 | 3. Scroll down to '....'
18 | 4. See error
19 |
20 | **Expected behavior**
21 | A clear and concise description of what you expected to happen.
22 |
23 | **Screenshots**
24 | If applicable, add screenshots to help explain your problem.
25 |
26 | **Desktop (please complete the following information):**
27 | - OS: [e.g. iOS]
28 | - Browser [e.g. chrome, safari]
29 | - Version [e.g. 22]
30 |
31 | **Smartphone (please complete the following information):**
32 | - Device: [e.g. iPhone6]
33 | - OS: [e.g. iOS8.1]
34 | - Browser [e.g. stock browser, safari]
35 | - Version [e.g. 22]
36 |
37 | **Additional context**
38 | Add any other context about the problem here.
39 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | private
2 | __pycache__/
3 | .comfycloud_profile
4 | /logs
5 | /temp
6 | node_modules
7 |
8 |
--------------------------------------------------------------------------------
/.husky/pre-commit:
--------------------------------------------------------------------------------
1 | npm run lint-staged
2 |
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | # Contributor Covenant Code of Conduct
2 |
3 | ## Our Pledge
4 |
5 | We as members, contributors, and leaders pledge to make participation in our
6 | community a harassment-free experience for everyone, regardless of age, body
7 | size, visible or invisible disability, ethnicity, sex characteristics, gender
8 | identity and expression, level of experience, education, socio-economic status,
9 | nationality, personal appearance, race, religion, or sexual identity
10 | and orientation.
11 |
12 | We pledge to act and interact in ways that contribute to an open, welcoming,
13 | diverse, inclusive, and healthy community.
14 |
15 | ## Our Standards
16 |
17 | Examples of behavior that contributes to a positive environment for our
18 | community include:
19 |
20 | * Demonstrating empathy and kindness toward other people
21 | * Being respectful of differing opinions, viewpoints, and experiences
22 | * Giving and gracefully accepting constructive feedback
23 | * Accepting responsibility and apologizing to those affected by our mistakes,
24 | and learning from the experience
25 | * Focusing on what is best not just for us as individuals, but for the
26 | overall community
27 |
28 | Examples of unacceptable behavior include:
29 |
30 | * The use of sexualized language or imagery, and sexual attention or
31 | advances of any kind
32 | * Trolling, insulting or derogatory comments, and personal or political attacks
33 | * Public or private harassment
34 | * Publishing others' private information, such as a physical or email
35 | address, without their explicit permission
36 | * Other conduct which could reasonably be considered inappropriate in a
37 | professional setting
38 |
39 | ## Enforcement Responsibilities
40 |
41 | Community leaders are responsible for clarifying and enforcing our standards of
42 | acceptable behavior and will take appropriate and fair corrective action in
43 | response to any behavior that they deem inappropriate, threatening, offensive,
44 | or harmful.
45 |
46 | Community leaders have the right and responsibility to remove, edit, or reject
47 | comments, commits, code, wiki edits, issues, and other contributions that are
48 | not aligned to this Code of Conduct, and will communicate reasons for moderation
49 | decisions when appropriate.
50 |
51 | ## Scope
52 |
53 | This Code of Conduct applies within all community spaces, and also applies when
54 | an individual is officially representing the community in public spaces.
55 | Examples of representing our community include using an official e-mail address,
56 | posting via an official social media account, or acting as an appointed
57 | representative at an online or offline event.
58 |
59 | ## Enforcement
60 |
61 | Instances of abusive, harassing, or otherwise unacceptable behavior may be
62 | reported to the community leaders responsible for enforcement at
63 | @extrafuzzy_ on Discord.
64 | All complaints will be reviewed and investigated promptly and fairly.
65 |
66 | All community leaders are obligated to respect the privacy and security of the
67 | reporter of any incident.
68 |
69 | ## Enforcement Guidelines
70 |
71 | Community leaders will follow these Community Impact Guidelines in determining
72 | the consequences for any action they deem in violation of this Code of Conduct:
73 |
74 | ### 1. Correction
75 |
76 | **Community Impact**: Use of inappropriate language or other behavior deemed
77 | unprofessional or unwelcome in the community.
78 |
79 | **Consequence**: A private, written warning from community leaders, providing
80 | clarity around the nature of the violation and an explanation of why the
81 | behavior was inappropriate. A public apology may be requested.
82 |
83 | ### 2. Warning
84 |
85 | **Community Impact**: A violation through a single incident or series
86 | of actions.
87 |
88 | **Consequence**: A warning with consequences for continued behavior. No
89 | interaction with the people involved, including unsolicited interaction with
90 | those enforcing the Code of Conduct, for a specified period of time. This
91 | includes avoiding interactions in community spaces as well as external channels
92 | like social media. Violating these terms may lead to a temporary or
93 | permanent ban.
94 |
95 | ### 3. Temporary Ban
96 |
97 | **Community Impact**: A serious violation of community standards, including
98 | sustained inappropriate behavior.
99 |
100 | **Consequence**: A temporary ban from any sort of interaction or public
101 | communication with the community for a specified period of time. No public or
102 | private interaction with the people involved, including unsolicited interaction
103 | with those enforcing the Code of Conduct, is allowed during this period.
104 | Violating these terms may lead to a permanent ban.
105 |
106 | ### 4. Permanent Ban
107 |
108 | **Community Impact**: Demonstrating a pattern of violation of community
109 | standards, including sustained inappropriate behavior, harassment of an
110 | individual, or aggression toward or disparagement of classes of individuals.
111 |
112 | **Consequence**: A permanent ban from any sort of public interaction within
113 | the community.
114 |
115 | ## Attribution
116 |
117 | This Code of Conduct is adapted from the [Contributor Covenant][homepage],
118 | version 2.0, available at
119 | https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
120 |
121 | Community Impact Guidelines were inspired by [Mozilla's code of conduct
122 | enforcement ladder](https://github.com/mozilla/diversity).
123 |
124 | [homepage]: https://www.contributor-covenant.org
125 |
126 | For answers to common questions about this code of conduct, see the FAQ at
127 | https://www.contributor-covenant.org/faq. Translations are available at
128 | https://www.contributor-covenant.org/translations.
129 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | Installation - Docs - Support - Bug reports
6 |
7 |
8 |
9 |
10 |
11 |
12 | ## Run your workflow using cloud GPU resources, from your local ComfyUI
13 | Don't have enough VRAM for certain nodes? Our custom node enables you to run ComfyUI locally with full control, while utilizing cloud GPU resources for your workflow.
14 |
15 | - Run workflows that require high VRAM
16 | - Don't have to bother with importing custom nodes/models into cloud providers
17 | - No need to spend cash for a new GPU
18 |
19 |
20 | https://github.com/nathannlu/ComfyUI-Cloud/assets/24965772/b53888a1-8e9a-4ddb-914b-fce4f0c157fe
21 |
22 |
23 |
24 |
25 | ## Comfy Cloud Plugin Installation
26 |
27 | > Plugin lets you execute workflows on a cloud GPU, even if your laptop does not have one.
28 |
29 | 1. `cd custom_nodes`
30 | 2. `git clone https://github.com/nathannlu/comfyui-cloud.git`
31 | 3. Run your workflow!
32 |
33 | ## How to use
34 | Check out our [Getting Started guide](https://github.com/nathannlu/ComfyUI-Cloud/blob/main/docs/get-started.md)!
35 |
36 | ## Special Thanks
37 |
38 |
39 |
40 |
41 |
--------------------------------------------------------------------------------
/__init__.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 |
4 | WEB_DIRECTORY = "js"
5 | NODE_CLASS_MAPPINGS = {}
6 | __all__ = ['NODE_CLASS_MAPPINGS']
7 |
8 | sys.path.append(os.path.join(os.path.dirname(__file__)))
9 |
10 | import inspect
11 | import sys
12 | import importlib
13 | import subprocess
14 | import requests
15 | import folder_paths
16 | from folder_paths import add_model_folder_path, get_filename_list, get_folder_paths
17 | from tqdm import tqdm
18 | from .python import custom_routes
19 |
20 |
21 | # Install requirements
22 | import threading
23 | import locale
24 |
25 | def handle_stream(stream, prefix):
26 | stream.reconfigure(encoding=locale.getpreferredencoding(), errors='replace')
27 | for msg in stream:
28 | if prefix == '[!]' and ('it/s]' in msg or 's/it]' in msg) and ('%|' in msg or 'it [' in msg):
29 | if msg.startswith('100%'):
30 | print('\r' + msg, end="", file=sys.stderr),
31 | else:
32 | print('\r' + msg[:-1], end="", file=sys.stderr),
33 | else:
34 | if prefix == '[!]':
35 | print(prefix, msg, end="", file=sys.stderr)
36 | else:
37 | print(prefix, msg, end="")
38 |
39 | def run_script(cmd, cwd='.'):
40 | if len(cmd) > 0 and cmd[0].startswith("#"):
41 | print(f"[ComfyUI-Manager] Unexpected behavior: `{cmd}`")
42 | return 0
43 |
44 | process = subprocess.Popen(cmd, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, bufsize=1)
45 |
46 | stdout_thread = threading.Thread(target=handle_stream, args=(process.stdout, ""))
47 | stderr_thread = threading.Thread(target=handle_stream, args=(process.stderr, "[!]"))
48 |
49 | stdout_thread.start()
50 | stderr_thread.start()
51 |
52 | stdout_thread.join()
53 | stderr_thread.join()
54 |
55 | return process.wait()
56 |
57 | try:
58 | import modal
59 | except:
60 | my_path = os.path.dirname(__file__)
61 | requirements_path = os.path.join(my_path, "requirements.txt")
62 |
63 | print(f"## Comfy Cloud: installing dependencies")
64 |
65 | run_script([sys.executable, '-s', '-m', 'pip', 'install', '-r', requirements_path])
66 |
67 | try:
68 | import git
69 | except:
70 | print(f"## [ERROR] Comfy Cloud: Attempting to reinstall dependencies using an alternative method.")
71 | run_script([sys.executable, '-s', '-m', 'pip', 'install', '--user', '-r', requirements_path])
72 |
73 | try:
74 | import git
75 | except:
76 | print(f"## [ERROR] Comfy Cloud: Failed to install the GitPython package in the correct Python environment. Please install it manually in the appropriate environment.")
77 |
78 | print(f"## Comfy Cloud: installing dependencies done.")
79 |
80 |
81 |
82 |
83 |
84 |
--------------------------------------------------------------------------------
/docs/get-started.md:
--------------------------------------------------------------------------------
1 | # Get Started
2 |
3 | Once you have installed the custom node, you will notice a new button appearing on your right-hand panel labeled "Generate on Cloud" below the "Queue Prompt" button. Click on this button to begin.
4 | 
5 |
6 | **NOTE**: DO NOT attempt to manually search and create the ComfyUI Cloud node. The custom node will handle this process automatically for you at a later stage.
7 |
8 | ## Log in
9 |
10 | Create an account to commence using the node. Currently, this account is utilized to keep your generated images private and to manage your generation credits if you opt to purchase more.
11 | 
12 |
13 |
14 | Upon logging in, click the "Generate on Cloud" button once more to name and upload your workflow.
15 |
16 | ## Uploading Your Workflow
17 |
18 | After providing a name, the custom node will search for models, custom nodes, and images your workflow relies on. It will subsequently upload them to your cloud. This step may take some time, particularly if your workflow utilizes numerous custom nodes and models. Feel free to take a coffee break!
19 |
20 | 
21 |
22 |
23 | **IMPORTANT**: This node exclusively searches for models, images, and custom nodes within your ComfyUI folder. Thus, if your ComfyUI utilizes models from Automatic1111 installation via extra_model_paths.yaml, you must relocate the models to inside ComfyUI and deactivate Comfy's extra_model_paths.yaml.
24 |
25 | ## Running Your Workflow
26 |
27 | Upon completion of the upload process, a Comfy Cloud node will be created in your workflow, automatically executing your workflow in the cloud once.
28 | If you delete this Comfy Cloud node, you will be required to re-upload your workflow.
29 | 
30 |
31 |
32 | You can access all cloud workflow runs by clicking the blue "View past runs" button in your Comfy Cloud node.
33 |
34 | 
35 |
36 |
37 | ## Viewing your generation results
38 | To locate your most recent generation, scroll to the bottom of the table that pops up after clicking "View past runs" and select the item with the latest timestamp. If your workflow requires significant time for generation, it will display progress, iterations per second, an estimated time for completion, and an option to halt execution. Stopping the execution will result in charges only up to the point of termination.
39 |
40 | Upon successful generation, the panel will automatically display your generated image.
41 |
42 | 
43 |
--------------------------------------------------------------------------------
/js/assistant/core.js:
--------------------------------------------------------------------------------
1 |
2 | export class GameObject {
3 | constructor(x, y, width, height) {
4 | this.x = x
5 | this.y = y
6 | this.width = width
7 | this.height = height
8 | this.id = "hi"
9 | this.isActive = true
10 | }
11 |
12 | onClick() {}
13 |
14 | // Check if this object is touching another object
15 | isTouching(otherObject) {
16 | return (
17 | this.x < otherObject.x + otherObject.width &&
18 | this.x + this.width > otherObject.x &&
19 | this.y < otherObject.y + otherObject.height &&
20 | this.y + this.height > otherObject.y
21 | )
22 | }
23 |
24 | delete() {
25 | this.isActive = false
26 | }
27 | }
28 |
--------------------------------------------------------------------------------
/js/assistant/pet.js:
--------------------------------------------------------------------------------
1 | //import { GIF } from "../libs/gif.js";
2 | import { GameObject } from './core.js'
3 | import workflowState from './state.js'
4 |
5 | // Our sprite sheet is on a grid of 64pxs
6 | // Each row is 64px tall, and each frame is 64px wide
7 | const SPRITE_SIZE = 128
8 | const SPRITE_SHEET = {
9 | JUMP: {
10 | row: 0,
11 | frames: 11,
12 | },
13 | IDLE1: {
14 | row: 1,
15 | frames: 5,
16 | },
17 | IDLE2: {
18 | row: 2,
19 | frames: 5,
20 | },
21 | SIT: {
22 | row: 3,
23 | frames: 9,
24 | },
25 | WALK: {
26 | row: 4,
27 | frames: 5,
28 | },
29 | RUN: {
30 | row: 5,
31 | frames: 8,
32 | },
33 | SNIFF: {
34 | row: 6,
35 | frames: 8,
36 | },
37 | SNIFF_WALK: {
38 | row: 7,
39 | frames: 8,
40 | },
41 | }
42 |
43 | /**
44 | * Base pet class
45 | */
46 | export class Pet extends GameObject {
47 | constructor({ x, y, height, width }) {
48 | super(x, y, height, width)
49 | // Pet state
50 | this.x = x
51 | this.currentDirection = 'right'
52 |
53 | this.height = height
54 | this.width = width
55 |
56 | this.emote = false
57 | this.talk = false
58 | this.talkText = ''
59 |
60 | this.hungerPoints = 0
61 |
62 | // Properties here tell when the
63 | // pet to change directions. Right now
64 | // the pet will randomly change directions
65 | // after t seconds.
66 | this.time = 0
67 | this.directionDuration = 0
68 | this.scaleDownBy = 10
69 |
70 | // Assets
71 | this.petImage = new Image()
72 | this.petImage.src =
73 | 'https://comfyui-output.nyc3.cdn.digitaloceanspaces.com/babycorgi-sprite-128x128.png'
74 |
75 | this.textBubble = new Image()
76 | this.textBubble.src =
77 | 'https://comfyui-output.nyc3.cdn.digitaloceanspaces.com/text-bubble.png'
78 |
79 | this.age = 0
80 | this._initializePet()
81 | }
82 |
83 | async _initializePet() {
84 | this.talk = false
85 | this.talkText = ''
86 | }
87 |
88 | /**
89 | * Creates a list of animations from a spritesheet
90 | * - e.g. renderWalk, renderSniff_walk, renderIdle1
91 | */
92 | createSpriteAnimations(image, scaleDownBy) {
93 | Object.keys(SPRITE_SHEET).forEach((animName) => {
94 | // transform name to title case
95 | // FUNC1 -> Func1
96 | const titleCase =
97 | animName.charAt(0).toUpperCase() + animName.slice(1).toLowerCase()
98 | const funcName = `render${titleCase}`
99 |
100 | const spriteFrames = SPRITE_SHEET[animName].frames
101 | const spriteFramesY = SPRITE_SHEET[animName].row
102 | this[funcName] = (ctx, renderCount, slowFpsBy = 10) => {
103 | this.renderSpriteAnimation(
104 | ctx,
105 | image,
106 | {
107 | renderCount,
108 | spriteFrames: spriteFrames - 1,
109 | spriteFramesY,
110 | slowFpsBy,
111 | },
112 | {
113 | scaleDownBy
114 | }
115 | )
116 | }
117 | })
118 | }
119 |
120 | setTalk(text) {
121 | // set an emote for t seconds
122 | this.talk = true
123 | this.talkText = text
124 | const timePerChar = 75
125 | const duration = this.talkText.length * timePerChar;
126 |
127 | setTimeout(() => {
128 | this.talk = false
129 | }, duration)
130 | }
131 |
132 | onClick() {
133 | this.setTalk(workflowState.getState("workflowState"))
134 | }
135 |
136 | // // debug function
137 | _showHitBox(ctx) {
138 | if (!ctx) {
139 | console.error("Canvas context (ctx) is undefined.");
140 | return;
141 | }
142 |
143 | ctx.fillStyle = 'blue'
144 | if (ctx.fillRect) {
145 | ctx.fillRect(
146 | this.x, // x
147 | this.y,
148 | this.width,
149 | this.height,
150 | )
151 | }
152 |
153 | }
154 |
155 | renderSpriteAnimation(ctx, spriteSheet, frameSettings) {
156 | const {
157 | renderCount,
158 | spriteFrames,
159 | spriteFramesY,
160 | slowFpsBy: _slowFpsBy
161 | } = frameSettings
162 |
163 | let slowFpsBy = _slowFpsBy || 10
164 | // const { scaleDownBy } = options
165 |
166 | const _spriteFramesY = SPRITE_SIZE * spriteFramesY
167 | const spriteRenderSize = SPRITE_SIZE // This is the final size users see the sprite as
168 | // ctx.imageSmoothingEnabled = true
169 | // ctx.imageSmoothingQuality = 'high'
170 |
171 | // There is 5 frames in the sprite sheet for walking
172 | // so instead of doing this.renderCount % 4 (0 - 5 frames),
173 | // we do 0 - 50 frames and scale down for a lower image fps.
174 | const _frame = renderCount % (spriteFrames * slowFpsBy)
175 | const frame = Math.round(_frame/ slowFpsBy)
176 |
177 | const currentRenderFrame = SPRITE_SIZE * frame
178 |
179 | // Offset
180 | const offsetX = (spriteRenderSize - this.width) / 2
181 | const offsetY = (spriteRenderSize - this.height) / 2
182 |
183 | if (ctx?.drawImage) {
184 | ctx.drawImage(
185 | spriteSheet,
186 | currentRenderFrame,
187 | _spriteFramesY,
188 | SPRITE_SIZE,
189 | SPRITE_SIZE,
190 | this.x - offsetX,
191 | this.y - offsetY,
192 | spriteRenderSize,
193 | spriteRenderSize,
194 | )
195 | }
196 |
197 | }
198 |
199 |
200 | move(ctx, renderCount) {
201 | this.renderIdle2(ctx, renderCount)
202 | }
203 |
204 |
205 | renderTextBubble(ctx) {
206 | ctx.fillStyle = 'black'
207 | ctx.font = '14px Courier New'
208 | ctx.fontWeight = 'bold'
209 |
210 | const lines = this.talkText.split('\n');
211 |
212 | const textBubbleWidth = this.talkText.length * 7
213 | const textBubbleHeight = lines.length * 35
214 |
215 |
216 | const textBubbleX = this.x + this.width
217 | const textBubbleY = this.y - textBubbleHeight
218 |
219 |
220 | ctx.drawImage(
221 | this.textBubble,
222 | textBubbleX,
223 | textBubbleY,
224 | textBubbleWidth,
225 | textBubbleHeight,
226 | )
227 |
228 | const textX = textBubbleX + textBubbleWidth / 6
229 | const textY = textBubbleY + textBubbleHeight / 2.4
230 |
231 | for (let i = 0; i < lines.length; i++) {
232 | ctx.fillText(lines[i], textX, textY + i * 20);
233 | }
234 |
235 | // }
236 | }
237 |
238 | render(ctx, renderCount) {
239 | this.createSpriteAnimations(this.petImage, this.scaleDownBy)
240 |
241 | if (this.talk) {
242 | this.renderTextBubble(ctx)
243 | }
244 |
245 | // this._showHitBox(ctx)
246 | this.move(ctx, renderCount)
247 | }
248 | }
249 |
--------------------------------------------------------------------------------
/js/assistant/state.js:
--------------------------------------------------------------------------------
1 | import { helpHandler } from "../button/support.js"
2 |
3 | export const WorkflowState = {
4 | INCORRECT_START_NODE: "Delete me and click\n'Generate'! Woof!",
5 | INSUFFICIENT_CREDITS: "Borked.\nNot enough credits to\nrun another workflow.\nPress 'Account' to top up. ",
6 | IDLE: "No workflows running.\nLet's generate something! Woof.",
7 | CREATING: "Creating new workflow.\nThis may take a while. Woof.",
8 | SYNCING: "Syncing dependencies to the cloud.\nWoof.",
9 | UPDATING: "Updating workflows with the cloud.\nWoof.",
10 | PROCESSING: "Processing workflow for execution.\nClick 'View Results' to see its progress.",
11 | RUNNING: "Workflow's running.\nLet's get a coffeee. Woof.",
12 | FINISHED: "Workflow's done,\nlet's have a peep!",
13 | };
14 |
15 | class State {
16 | constructor() {
17 | if (!State.instance) {
18 | this.state = {workflowState: WorkflowState.IDLE};
19 | State.instance = this;
20 | }
21 | return State.instance;
22 | }
23 |
24 | getState(key) {
25 | helpHandler("assistant")
26 | return this.state[key];
27 | }
28 |
29 | setState(key, value) {
30 | this.state[key] = value;
31 | }
32 | }
33 |
34 | const instance = new State();
35 | Object.freeze(instance);
36 |
37 | export default instance;
--------------------------------------------------------------------------------
/js/auth/index.js:
--------------------------------------------------------------------------------
1 | import { ComfyCloudDialog } from '../comfy/ui.js';
2 | import van from '../lib/van.js';
3 |
4 | import { Login } from './login.js';
5 | import { Register } from './register.js';
6 |
7 | const Auth = (dialogInstance) => {
8 | const activeTab = van.state(1)
9 |
10 | return () => van.tags.div(
11 | activeTab.val == 0 ? Login(dialogInstance, activeTab) : Register(dialogInstance, activeTab)
12 | )
13 | }
14 |
15 | export const authDialog = new ComfyCloudDialog(Auth)
16 |
--------------------------------------------------------------------------------
/js/auth/login.js:
--------------------------------------------------------------------------------
1 | import van from '../lib/van.js';
2 | import { setData } from '../store.js';
3 | import { nimbus } from '../resource/index.js';
4 | import { infoDialog } from '../comfy/ui.js';
5 | import { generateForm } from '../ui/form.js';
6 |
7 | const { a, div, p } = van.tags
8 |
9 |
10 | const loginUser = async ({email, password}, dialogInstance) => {
11 | // Retrieve values from the input fields
12 | try {
13 | const data = await nimbus.auth.login({
14 | email: email,
15 | password: password,
16 | })
17 | setData({
18 | apiKey: data.token,
19 | user: data.user
20 | })
21 |
22 | infoDialog.show();
23 | infoDialog.showMessage(
24 | "Authenticated",
25 | "You are now logged in",
26 | );
27 | dialogInstance.close()
28 |
29 | } catch(e) {
30 | throw new Error(e.message)
31 | }
32 | }
33 |
34 | export const Login = (dialogInstance, activeTab) => {
35 | const schema = {
36 | title: "Login",
37 | fields: {
38 | email: {
39 | label: "Email",
40 | type: "email",
41 | placeholder: "Enter your email",
42 | required: true
43 | },
44 | password: {
45 | label: "Password",
46 | type: "password",
47 | placeholder: "Enter your password",
48 | required: true
49 | }
50 | },
51 | onSubmit: loginUser,
52 | submitButtonText: "Login"
53 | };
54 |
55 | return () => div(
56 | generateForm(schema, dialogInstance),
57 | p({style: "color: #eee; text-align: center; margin-top: 20px; cursor: pointer;"},
58 | a({onclick: () => activeTab.val = 1}, "Don't have an account? Click here to sign in")
59 | )
60 | );
61 | }
62 |
--------------------------------------------------------------------------------
/js/auth/register.js:
--------------------------------------------------------------------------------
1 | import van from '../lib/van.js';
2 | import { setData } from '../store.js';
3 | import { nimbus } from '../resource/index.js';
4 | import { infoDialog } from '../comfy/ui.js';
5 | import { generateForm } from '../ui/form.js';
6 |
7 | const { a, div, p } = van.tags
8 |
9 | const registerUser = async ({email, password, confirmPassword}, dialogInstance) => {
10 | // Retrieve values from the input fields
11 |
12 | try {
13 | if(password !== confirmPassword) {
14 | throw new Error("Password does not match with Confirm Password")
15 | }
16 | const data = await nimbus.auth.register({
17 | email,
18 | password
19 | })
20 | setData({
21 | apiKey: data.token,
22 | user: data.user
23 | })
24 |
25 | infoDialog.show();
26 | infoDialog.showMessage(
27 | "Authenticated",
28 | "You are now logged in",
29 | );
30 | dialogInstance.close()
31 |
32 | } catch(e) {
33 | throw new Error(e.message)
34 | }
35 | }
36 |
37 | export const Register = (dialogInstance, activeTab) => {
38 | const schema = {
39 | title: "Get 150 credits for free by signing up",
40 | fields: {
41 | email: {
42 | label: "Email",
43 | type: "email",
44 | placeholder: "Enter your email",
45 | required: true
46 | },
47 | password: {
48 | label: "Password",
49 | type: "password",
50 | placeholder: "Enter your password",
51 | required: true
52 | },
53 | confirmPassword: {
54 | label: "Confirm password",
55 | type: "password",
56 | placeholder: "Re-enter your password",
57 | required: true
58 | }
59 | },
60 | onSubmit: registerUser,
61 | submitButtonText: "Sign up"
62 | };
63 |
64 | return () => div(
65 | generateForm(schema, dialogInstance),
66 | p({style: "color: #eee; text-align: center; margin-top: 20px; cursor: pointer;"},
67 | a({onclick: () => activeTab.val = 0}, "Already have an account? Click here to Log in")
68 | ),
69 | p({style: "color: #808080; text-align: center; margin-top: 18px;"},
70 | "All workflows are private and secured"
71 | )
72 | );
73 | }
74 |
--------------------------------------------------------------------------------
/js/button/actions.js:
--------------------------------------------------------------------------------
1 | import {
2 | resolveDependencies,
3 | pollSyncDependencies,
4 | } from "./dependencies.js"
5 | import {
6 | createMetaNode,
7 | getWorkflowName,
8 | setWorkflowId,
9 | getWorkflowId,
10 | getApiToken,
11 | validatePrompt,
12 | compareWorkflows,
13 | isWorkflowUpToDate
14 | } from "../utils.js"
15 | import { app } from '../comfy/comfy.js';
16 | import { infoDialog } from '../comfy/ui.js';
17 | import {
18 | setButtonDefault,
19 | setButtonLoading,
20 | setMessage,
21 | } from './ui.js';
22 | import { authDialog } from '../auth/index.js';
23 | import { nimbus, local } from '../resource/index.js';
24 | import { endpoint } from '../constants.js';
25 | import workflowState, { WorkflowState } from '../assistant/state.js';
26 | import {
27 | // ComfyCloudDialog,
28 | ComfyCloudPopover } from '../comfy/ui.js';
29 |
30 | import {
31 | taskId,
32 | Progress } from '../ui/uploadProgress.js';
33 |
34 | export const progressDialog = new ComfyCloudPopover(Progress, "Uploading dependencies...")
35 |
36 | export async function onGeneration() {
37 | try {
38 | setButtonDefault()
39 | // check auth
40 | const apiToken = getApiToken();
41 | const doesApiTokenExist = !!apiToken;
42 |
43 | if(!doesApiTokenExist) {
44 | // Request auth
45 | setButtonDefault()
46 | return authDialog.show();
47 | }
48 |
49 | await nimbus.workflow.init();
50 |
51 | // check if ComfyCloud meta node exists
52 | const deployMeta = app.graph.findNodesByType("ComfyCloud");
53 | let isNewWorkflow = deployMeta.length == 0
54 |
55 | // This case usually happens when user manually adds the ComfyCloud node
56 | // and doesn't delete it
57 | const hasNoId = !isNewWorkflow && !getWorkflowId()
58 | if(hasNoId) {
59 | app.graph.remove(deployMeta[0])
60 | isNewWorkflow = true
61 | }
62 |
63 | const localWorkflow = await app.graphToPrompt();
64 | const isValid = await validatePrompt(localWorkflow.output);
65 | if(!isValid) {
66 | throw new Error("Prompt is not valid")
67 | }
68 |
69 | // Start execution
70 | setButtonLoading();
71 |
72 | if(isNewWorkflow) {
73 | // Wait for user to input workflow name
74 | await createMetaNode();
75 | workflowState.setState("workflowState", WorkflowState.CREATING);
76 | //await createEmptyWorkflow()
77 | const newWorkflow = await nimbus.workflow.create({
78 | name: getWorkflowName(),
79 | })
80 | setWorkflowId(newWorkflow.id)
81 |
82 |
83 | setMessage("Creating new workflow. This may take awhile");
84 | }
85 |
86 | // compare workflow
87 | const existing_workflow = await nimbus.workflow.retrieve()
88 |
89 | const diffDeps = compareWorkflows(localWorkflow.output, existing_workflow.workflow_api);
90 |
91 | // sync workflow
92 | if(!isWorkflowUpToDate(diffDeps)) {
93 | setMessage("Syncing dependencies...");
94 | workflowState.setState("workflowState", WorkflowState.SYNCING);
95 |
96 | const { dependencies, workflow_patch } = await resolveDependencies(diffDeps)
97 | const res = await local.uploadDependencies({
98 | workflow_id: getWorkflowId(),
99 | endpoint,
100 | ...dependencies,
101 | })
102 |
103 |
104 | const uploadTaskId = res.task_id
105 | if(uploadTaskId) {
106 | taskId.val = uploadTaskId
107 | // Open UI window
108 | progressDialog.show();
109 |
110 | await pollSyncDependencies(uploadTaskId)
111 | }
112 |
113 | setMessage("Updating workflow...");
114 | workflowState.setState("workflowState", WorkflowState.UPDATING);
115 |
116 | await nimbus.workflow.update({
117 | workflow: localWorkflow.workflow,
118 | workflow_api: localWorkflow.output,
119 | workflow_patch: workflow_patch,
120 | dependencies: dependencies,
121 | })
122 | }
123 |
124 | // Beyond this point, we assume all dependencies
125 | // and workflow api is synced to the cloud
126 |
127 | // create run
128 | //await createRun()
129 | const newWorkflowRun = await nimbus.workflowRun.create()
130 |
131 | infoDialog.showMessage(
132 | "Item queued! Comfy Cloud node has been created on the ComfyUI interface",
133 | "You can view your generation results by clicking 'View Results' on the newly-created node."
134 | )
135 |
136 | const e = new CustomEvent('workflowRunCreated', {
137 | detail: {
138 | workflowRunId: newWorkflowRun.id
139 | }
140 | });
141 | document.dispatchEvent(e);
142 |
143 | workflowState.setState("workflowState", WorkflowState.PROCESSING);
144 | } catch (e) {
145 | // handle error
146 | await nimbus.workflow.error({ e: e.message || "No error message" });
147 | infoDialog.showMessage("Error", e);
148 | } finally {
149 | setButtonDefault()
150 | setMessage("")
151 | }
152 | }
153 |
154 | document.addEventListener('workflowRunCreated', async (e) => {
155 | let poll = true; // Use a boolean variable to control the loop
156 |
157 | while (poll) {
158 | await new Promise(resolve => setTimeout(resolve, 2000)); // Wait for 2 seconds before the next poll
159 |
160 | try {
161 | const { workflowRun } = await nimbus.workflowRun.pollRun(e.detail.workflowRunId);
162 | if (workflowRun?.status === "success" || workflowRun?.status === "failed" || workflowRun?.status === "terminated") {
163 | poll = false; // Exit the loop when a terminal status is reached
164 | }
165 | } catch (error) {
166 | console.error("Poll workflow run error:", error);
167 | poll = false; // Exit the loop if an error occurs
168 | }
169 | }
170 |
171 | console.log("Show notif: Workflow run completed", e.detail.workflowRunId);
172 | infoDialog.showMessage(
173 | "Workflow run completed!",
174 | "You got an extra +6 credits for running this workflow. You can use these credits to run more workflows. View your generation results by clicking the 'Menu' button in your Comfy Cloud custom node."
175 | );
176 | });
177 |
--------------------------------------------------------------------------------
/js/button/dependencies.js:
--------------------------------------------------------------------------------
1 | import {
2 | getWorkflowId,
3 | getCustomNodesList,
4 | isCustomNode,
5 | } from '../utils.js'
6 | import { local } from '../resource/index.js';
7 |
8 | const isVideoExtension = z => z.endsWith('.webm') ||
9 | z.endsWith('.mp4') ||
10 | z.endsWith('.mkv') ||
11 | z.endsWith('.gif')
12 | const isModelExtension = z => z.endsWith('.safetensors') ||
13 | z.endsWith('.pth') ||
14 | z.endsWith('.pt') ||
15 | z.endsWith('.bin') ||
16 | z.endsWith('.ckpt') ||
17 | z.endsWith('.onnx')
18 |
19 | const isDirectory = async (path) => {
20 | const { exists } = await local.validatePathDirectory({
21 | path
22 | })
23 | return exists;
24 | }
25 |
26 |
27 | // Loops over different nodes, and
28 | // finds paths that ends in
29 | // - model formats: .ckpt, .safetensor, etc
30 | // - images: .png, jpeg, etc
31 | // - videos (for vhs): .mp4, etc
32 | export const resolveDependencies = async (diff) => {
33 | const workflow_id = getWorkflowId()
34 | let modelsToUpload = []
35 | let dependenciesToUpload = []
36 | let filesToUpload = []
37 | let patch = {}
38 |
39 | if(!diff) {
40 | return {
41 | dependencies: {
42 | modelsToUpload,
43 | filesToUpload,
44 | nodesToUpload: dependenciesToUpload,
45 | },
46 | patch,
47 | }
48 | }
49 |
50 | // Find items that end with tf paths
51 | // comfyui supported model extensions = set(['.ckpt', '.pt', '.bin', '.pth', '.safetensors'])
52 | //
53 | // Note:
54 | // - this will cause an error if in the prompt user types
55 | // in .safetensors
56 | for (const [k, v] of Object.entries(diff)) {
57 |
58 | // Edge case - upload images
59 | if (v?.class_type == 'LoadImage') {
60 | filesToUpload.push(v.inputs.image)
61 | }
62 |
63 | // PART 1 - Handle models (LoRAs, checkpoints, etc) & VHS files
64 | // --
65 | // Loop through every value in [class_type][inputs] and
66 | // check if it contains a file extension
67 | if (v?.inputs) {
68 | for (const [l,z] of Object.entries(v?.inputs)) {
69 |
70 | const isInputValuePotentialPath = typeof z == "string"
71 | if (isInputValuePotentialPath) {
72 |
73 | // Handle VHS video extensions
74 | if (isVideoExtension(z) || await isDirectory(z)) {
75 | const filename = extractFilename(z)
76 | filesToUpload.push(filename)
77 |
78 | // Patch input for files like VHS node
79 | let mendedNode = {
80 | ...v,
81 | }
82 | mendedNode["inputs"][l] = `/vol/vol/${workflow_id}/comfyui/input/${filename}`
83 | patch[k] = mendedNode
84 | }
85 |
86 | // Handle models, LoRAs, and checkpoints
87 | if (isModelExtension(z)) {
88 | modelsToUpload.push(z);
89 | }
90 | }
91 | }
92 | }
93 |
94 | // PART 2 - Handle custom nodes
95 | // --
96 | // Filter out Comfy nodes and custom nodes
97 | if(!isCustomNode(v?.class_type)) {
98 | // search for class_type in custom_nodes_list
99 | const customNodesList = await getCustomNodesList()
100 | for (let name in customNodesList) {
101 | if(customNodesList[name].indexOf(v?.class_type) !== -1) {
102 | // found in current custom_node
103 | dependenciesToUpload.push(name)
104 | }
105 | }
106 | }
107 | }
108 |
109 | // Send api req to python server to check if
110 | // it is in the /input folder.
111 | // Right now we only can upload files that is inside
112 | // ComfyUI's /input folder.
113 | const { invalid_paths } = await local.validatePaths({
114 | paths: filesToUpload
115 | })
116 | if(invalid_paths.length > 0){
117 | throw new Error("Make sure the following file/folder names are inside your ComfyUI /input folder: " + invalid_paths.join(", "))
118 | }
119 |
120 | return {
121 | dependencies: {
122 | modelsToUpload,
123 | filesToUpload,
124 | nodesToUpload: dependenciesToUpload,
125 | },
126 | patch,
127 | }
128 | }
129 |
130 |
131 | export const pollSyncDependenciesStatus = {
132 | STARTED: 'Started',
133 | COMPLETED: 'Completed',
134 | HASHING: 'Hashing',
135 | UPLOADING: 'Uploading',
136 | ERROR: 'Failed',
137 | }
138 | export const pollSyncDependencies = async (taskId) => {
139 | let status = '';
140 | let statusMessage = '';
141 | while (status !== pollSyncDependenciesStatus.COMPLETED && status !== pollSyncDependenciesStatus.ERROR) {
142 | await new Promise(resolve => setTimeout(resolve, 1000)); // Wait for 1 second before the next poll
143 |
144 | try {
145 | const statusData = await local.pollUploadStatus(taskId) //await fetch(`/comfy-cloud/upload-status/${taskId}`);
146 | status = statusData.status;
147 | statusMessage = statusData.message;
148 |
149 | } catch(e) {
150 | statusMessage = e?.message;
151 | console.error("Poll dependencies error:", e)
152 | }
153 | }
154 |
155 | if (status == pollSyncDependenciesStatus.ERROR) {
156 | throw new Error(statusMessage || "Failed to upload")
157 | }
158 | }
159 |
160 |
161 | function extractFilename(filepath) {
162 | let _filepath = filepath;
163 | const isDirectory = filepath.endsWith('/')
164 | if (isDirectory) {
165 | // remove the extra slash at the end
166 | _filepath = filepath.slice(0, -1);
167 | }
168 |
169 | // Split the filepath by '/'
170 | const parts = _filepath.split('/');
171 | // Take the last part which represents the filename
172 | const filename = parts[parts.length - 1];
173 | return filename;
174 | }
175 |
176 |
177 |
--------------------------------------------------------------------------------
/js/button/index.js:
--------------------------------------------------------------------------------
1 | export { addInterface } from "./ui.js";
2 |
--------------------------------------------------------------------------------
/js/button/support.js:
--------------------------------------------------------------------------------
1 | import { nimbus } from '../resource/index.js';
2 | import { getUser } from '../utils.js';
3 |
4 | export const helpHandler = async (type) => {
5 | const user = await getUser();
6 |
7 | const supportTypes = {
8 | feedback: nimbus.support.feedback,
9 | support: nimbus.support.support,
10 | docs: nimbus.support.docs,
11 | tooltipHover: nimbus.support.tooltipHover,
12 | tooltipDocs: nimbus.support.tooltipDocs,
13 | assistant: nimbus.support.assistant
14 | };
15 |
16 | if (supportTypes[type]) {
17 | await supportTypes[type]({ user_id: user?.id });
18 | } else {
19 | throw new Error(`Unsupported support type: ${type}`);
20 | }
21 | };
22 |
--------------------------------------------------------------------------------
/js/button/ui.js:
--------------------------------------------------------------------------------
1 | import { onGeneration } from "./actions.js";
2 | import { loadingIcon, cloudIconWhite } from "../ui/html.js";
3 | import { helpHandler } from "./support.js";
4 |
5 |
6 | /**
7 | * HTML, UI dialogs, etc
8 | */
9 | const generateOnCloudButtonHTML = `
10 |
11 |
12 | ${cloudIconWhite}
13 |
14 |
15 | Generate
16 |
on cloud GPU
17 |
18 |
19 | `;
20 |
21 | const cloudButtonLoadingHTML = `
22 |
23 | Executing...
24 |
25 | `;
26 |
27 | const feedbackButtonHTML = `
28 |
29 | Give Feedback
30 |
31 | `;
32 |
33 | const supportButtonHTML = `
34 |
35 | Get Support
36 |
37 | `;
38 |
39 | const docsButtonHTML = `
40 |
41 | Read Docs
42 |
43 | `;
44 |
45 | export function addInterface() {
46 | //const menu = document.querySelector(".comfy-menu");
47 | const queueButton = document.getElementById("queue-button");
48 |
49 | const cloudInference = document.createElement("button");
50 | cloudInference.id = "comfycloud-gpu-button";
51 | cloudInference.style.position = "relative";
52 | cloudInference.style.borderRadius = "12px";
53 | cloudInference.style.marginBottom = "12px";
54 | cloudInference.style.display = "block";
55 | cloudInference.style.background =
56 | "linear-gradient(to right, #0e42ed, #02a5db)";
57 | cloudInference.innerHTML = generateOnCloudButtonHTML;
58 | cloudInference.onclick = async () => {
59 | await onGeneration();
60 | };
61 |
62 | const supportButton = document.createElement("button");
63 | supportButton.id = "comfycloud-support-button";
64 | supportButton.style.position = "relative";
65 | supportButton.style.borderRadius = "12px";
66 | supportButton.style.height = "16px";
67 | supportButton.style.display = "flex";
68 | supportButton.style.alignItems = "center";
69 | supportButton.style.justifyContent = "center";
70 | supportButton.innerHTML = supportButtonHTML;
71 | supportButton.onclick = async () => {
72 | helpHandler("support");
73 | window.open("https://discord.gg/2PTNx3VCYa", "_blank");
74 | };
75 |
76 | const feedbackButton = document.createElement("button");
77 | feedbackButton.id = "comfycloud-bugfixes-button";
78 | feedbackButton.style.position = "relative";
79 | feedbackButton.style.borderRadius = "12px";
80 | feedbackButton.style.height = "16px";
81 | feedbackButton.style.display = "flex";
82 | feedbackButton.style.alignItems = "center";
83 | feedbackButton.style.justifyContent = "center";
84 | feedbackButton.innerHTML = feedbackButtonHTML;
85 | feedbackButton.onclick = async () => {
86 | helpHandler("feedback");
87 | window.open("https://discord.gg/2PTNx3VCYa", "_blank");
88 | };
89 |
90 | const docsButton = document.createElement("button");
91 | docsButton.id = "comfycloud-docs-button";
92 | docsButton.style.position = "relative";
93 | docsButton.style.borderRadius = "12px";
94 | docsButton.style.height = "16px";
95 | docsButton.style.display = "flex";
96 | docsButton.style.alignItems = "center";
97 | docsButton.style.justifyContent = "center";
98 | docsButton.innerHTML = docsButtonHTML;
99 | docsButton.onclick = () => {
100 | helpHandler("docs");
101 | window.open(
102 | "https://github.com/nathannlu/ComfyUI-Cloud/blob/main/docs/get-started.md",
103 | "_blank"
104 | );
105 | };
106 |
107 | const dividerTop = document.createElement("hr");
108 | dividerTop.style.width = "100%";
109 | dividerTop.style.color = "#000";
110 | dividerTop.style.margin = "10px 0";
111 |
112 | const dividerBottom = document.createElement("hr");
113 | dividerBottom.style.width = "100%";
114 | dividerBottom.style.color = "#000";
115 | dividerBottom.style.marginTop = "12px";
116 |
117 | const titleElement = document.createElement("div");
118 | titleElement.id = "comfycloud-title";
119 | titleElement.innerText = "ComfyUI-Cloud";
120 | titleElement.style.marginBottom = "10px";
121 | titleElement.style.fontSize = "14px";
122 | titleElement.style.textAlign = "center";
123 | titleElement.style.display = "flex";
124 | titleElement.style.justifyContent = "center";
125 | titleElement.style.alignItems = "center";
126 |
127 | const tooltipButton = document.createElement("button");
128 | tooltipButton.id = "comfycloud-tooltip-button";
129 | tooltipButton.innerText = "?";
130 | tooltipButton.style.fontSize = "14px";
131 | tooltipButton.style.marginLeft = "10px";
132 | tooltipButton.style.borderRadius = "50%";
133 | tooltipButton.style.border = "none";
134 | tooltipButton.style.backgroundColor = "#b5b5b5";
135 | tooltipButton.style.color = "white";
136 | tooltipButton.style.width = "20px";
137 | tooltipButton.style.height = "20px";
138 | tooltipButton.style.display = "flex";
139 | tooltipButton.style.justifyContent = "center";
140 | tooltipButton.style.alignItems = "center";
141 | tooltipButton.style.cursor = "pointer";
142 | tooltipButton.style.position = "relative";
143 | tooltipButton.onclick = () => {
144 | helpHandler("tooltipDocs");
145 | window.open(
146 | "https://github.com/nathannlu/ComfyUI-Cloud/blob/main/docs/get-started.md",
147 | "_blank"
148 | );
149 | };
150 | tooltipButton.onmouseover = function () {
151 | helpHandler("tooltipHover");
152 | tooltipText.style.visibility = "visible";
153 | tooltipText.style.opacity = "1";
154 | };
155 | tooltipButton.onmouseout = function () {
156 | tooltipText.style.visibility = "hidden";
157 | tooltipText.style.opacity = "0";
158 | };
159 |
160 | const tooltipText = document.createElement("div");
161 | tooltipText.id = "comfycloud-tooltip-text";
162 | tooltipText.style.visibility = "hidden";
163 | tooltipText.style.width = "250px";
164 | tooltipText.style.backgroundColor = "#555";
165 | tooltipText.style.color = "#fff";
166 | tooltipText.style.textAlign = "center";
167 | tooltipText.style.borderRadius = "6px";
168 | tooltipText.style.paddingInline = "16px";
169 | tooltipText.style.position = "absolute";
170 | tooltipText.style.zIndex = "1";
171 | tooltipText.style.bottom = "125%";
172 | tooltipText.style.left = "50%";
173 | tooltipText.style.marginLeft = "-290px";
174 | tooltipText.style.marginBottom = "-220px";
175 | tooltipText.style.opacity = "0";
176 | tooltipText.style.transition = "opacity 0.3s";
177 | tooltipText.innerHTML = `
178 |
179 |
180 | How to run a cloud workflow:
181 |
182 |
183 | Click "Generate on cloud GPU"
184 | Name your workflow and wait for it to be uploaded.
185 | Your workflow will be automatically executed.
186 |
187 |
Need more help? Click me to view the docs, or hit us up on Discord!
188 |
189 | `;
190 |
191 | const box = document.createElement("div");
192 | box.innerHTML = `
193 |
194 |
195 | `;
196 |
197 | tooltipButton.appendChild(tooltipText);
198 | titleElement.appendChild(tooltipButton);
199 |
200 | queueButton.after(dividerTop);
201 | dividerTop.after(titleElement);
202 | titleElement.after(cloudInference);
203 | cloudInference.after(feedbackButton);
204 | feedbackButton.after(supportButton);
205 | supportButton.after(docsButton);
206 | docsButton.after(box);
207 | box.after(dividerBottom);
208 | }
209 |
210 | export const setButtonLoading = () => {
211 | const menu = document.querySelector(".comfy-menu");
212 | const btn = menu.querySelector("#comfycloud-gpu-button");
213 | btn.innerHTML = cloudButtonLoadingHTML;
214 | btn.style.color = "#ddd";
215 | btn.disabled = true;
216 | };
217 |
218 | export const setButtonDefault = () => {
219 | const menu = document.querySelector(".comfy-menu");
220 | const btn = menu.querySelector("#comfycloud-gpu-button");
221 | btn.innerHTML = generateOnCloudButtonHTML;
222 | btn.style.color = "#ddd";
223 | btn.disabled = false;
224 | };
225 |
226 | export const setMessage = (text) => {
227 | const menu = document.querySelector(".comfy-menu");
228 | const title = menu.querySelector("#comfycloud-message");
229 |
230 | if (text.length > 0) {
231 | title.innerHTML = `${loadingIcon} ${text}`;
232 | } else {
233 | title.innerHTML = "";
234 | }
235 | //title.style.color = "orange";
236 | };
237 |
--------------------------------------------------------------------------------
/js/chatbot/chatbot.js.dep:
--------------------------------------------------------------------------------
1 | import { nimbus } from "../resource/index.js";
2 |
3 | export async function getBotResponse(message) {
4 | try {
5 | const data = await nimbus.chatbot.sendMessage({
6 | message: message,
7 | origin: "ComfyUI Cloud Chat",
8 | });
9 |
10 | return data;
11 | } catch (e) {
12 | throw new Error(e.message);
13 | }
14 | }
15 |
16 | export function isValidWorkflow(workflow) {
17 | return workflow && workflow.nodes && workflow.edges;
18 | }
19 |
20 | export function parseWorkflowFromBot(response) {
21 | const cleanedResponse = response.responses.bot
22 | .replace(/^\s*```json\s*/, "")
23 | .replace(/\s*```\s*$/, "");
24 |
25 | try {
26 | return JSON.parse(cleanedResponse);
27 | } catch (error) {
28 | console.error("Failed to parse JSON:", error);
29 | return cleanedResponse;
30 | }
31 | }
32 |
--------------------------------------------------------------------------------
/js/chatbot/edgeTypes.js.dep:
--------------------------------------------------------------------------------
1 | /* eslint-disable no-unused-vars */
2 | export function getInputType(name) {
3 | const nameTypeMap = {
4 | LATENT: "LATENT",
5 | MODEL: "MODEL",
6 | CLIP: "CLIP",
7 | VAE: "VAE",
8 | CONDITIONING: "CONDITIONING",
9 | IMAGE: "IMAGE",
10 | MASK: "MASK",
11 | CLIP_VISION_OUTPUT: "CLIP_VISION_OUTPUT",
12 | positive: "CONDITIONING",
13 | negative: "CONDITIONING",
14 | CONTROL_NET: "CONTROL_NET",
15 | STYLE_MODEL: "STYLE_MODEL",
16 | CLIP_VISION: "CLIP_VISION",
17 | GLIGEN: "GLIGEN",
18 | latent: "LATENT",
19 | UPSCALE_MODEL: "UPSCALE_MODEL",
20 | output: "LATENT",
21 | denoised_output: "LATENT",
22 | SIGMAS: "SIGMAS",
23 | SAMPLER: "SAMPLER",
24 | high_sigmas: "SIGMAS",
25 | low_sigmas: "SIGMAS",
26 | GUIDER: "GUIDER",
27 | NOISE: "NOISE",
28 | PHOTOMAKER: "PHOTOMAKER",
29 | stage_c: "LATENT",
30 | stage_b: "LATENT",
31 | controlnet_input: "IMAGE",
32 | STRING: "STRING",
33 | "*": "*",
34 | INT: "INT",
35 | FLOAT: "FLOAT",
36 | BLIP_MODEL: "BLIP_MODEL",
37 | bus: "BUS",
38 | model: "MODEL",
39 | clip: "CLIP",
40 | vae: "VAE",
41 | latent_filename: "STRING",
42 | latent_image: "LATENT",
43 | image_filename: "STRING",
44 | conditioning_filename: "STRING",
45 | NAME_STRING: "STRING",
46 | conditioning: "CONDITIONING",
47 | parsed_text: "STRING",
48 | raw_text: "STRING",
49 | NUMBER: "NUMBER",
50 | image_a_pass: "IMAGE",
51 | image_b_pass: "IMAGE",
52 | filepath_text: "STRING",
53 | filename_text: "STRING",
54 | MASK_IMAGE: "IMAGE",
55 | clipseg_model: "CLIPSEG_MODEL",
56 | IMAGES_BATCH: "IMAGE",
57 | MASKS_BATCH: "MASK",
58 | MASK_IMAGES_BATCH: "IMAGE",
59 | IMAGES: "IMAGE",
60 | DICT: "DICT",
61 | BOOLEAN: "BOOLEAN",
62 | composited_images: "IMAGE",
63 | ssao_images: "IMAGE",
64 | specular_mask_images: "IMAGE",
65 | ssdo_images: "IMAGE",
66 | ssdo_image_masks: "IMAGE",
67 | light_source_image_masks: "IMAGE",
68 | aspect_number: "NUMBER",
69 | aspect_float: "FLOAT",
70 | is_landscape_bool: "NUMBER",
71 | aspect_ratio_common: "STRING",
72 | aspect_type: "STRING",
73 | image: "IMAGE",
74 | images: "IMAGE",
75 | color_palettes: "LIST",
76 | CROP_DATA: "CROP_DATA",
77 | mask: "MASK",
78 | shadow_map: "IMAGE",
79 | highlight_map: "IMAGE",
80 | width_num: "NUMBER",
81 | height_num: "NUMBER",
82 | width_float: "FLOAT",
83 | height_float: "FLOAT",
84 | width_int: "INT",
85 | height_int: "INT",
86 | MASKS: "MASK",
87 | INT_PLACES: "INT",
88 | "latent(s)": "LATENT",
89 | tensor_w_num: "NUMBER",
90 | tensor_h_num: "NUMBER",
91 | masks: "MASK",
92 | cropped_mask: "MASK",
93 | crop_data: "CROP_DATA",
94 | top_int: "INT",
95 | left_int: "INT",
96 | right_int: "INT",
97 | bottom_int: "INT",
98 | midas_model: "MIDAS_MODEL",
99 | number: "NUMBER",
100 | float: "FLOAT",
101 | int: "INT",
102 | SEED: "SEED",
103 | positive_string: "STRING",
104 | negative_string: "STRING",
105 | seed: "SEED",
106 | SAM_MODEL: "SAM_MODEL",
107 | SAM_PARAMETERS: "SAM_PARAMETERS",
108 | samples: "LATENT",
109 | IMAGE_BOUNDS: "IMAGE_BOUNDS",
110 | LIST: "LIST",
111 | TEXT_A_PASS: "STRING",
112 | TEXT_B_PASS: "STRING",
113 | SCORE_NUMBER: "NUMBER",
114 | COMPARISON_TEXT: "STRING",
115 | result_text: "STRING",
116 | replacement_count_number: "NUMBER",
117 | replacement_count_float: "FLOAT",
118 | replacement_count_int: "INT",
119 | f: "B",
120 | o: "O",
121 | u: "O",
122 | n: "L",
123 | d: "E",
124 | line_text: "STRING",
125 | dictionary: "DICT",
126 | MODEL_NAME_TEXT: "STRING",
127 | image_pass: "IMAGE",
128 | IMAGE_PASS: "IMAGE",
129 | output_path: "STRING",
130 | processed_count: "NUMBER",
131 | BBOX_DETECTOR: "BBOX_DETECTOR",
132 | SEGS: "SEGS",
133 | cropped: "IMAGE",
134 | cropped_refined: "IMAGE",
135 | cropped_refined_alpha: "IMAGE",
136 | cnet_images: "IMAGE",
137 | segs: "SEGS",
138 | basic_pipe: "BASIC_PIPE",
139 | combined_mask: "MASK",
140 | batch_masks: "MASK",
141 | cropped_enhanced_alpha: "IMAGE",
142 | detailer_pipe: "DETAILER_PIPE",
143 | refiner_basic_pipe_opt: "BASIC_PIPE",
144 | bbox_detector: "BBOX_DETECTOR",
145 | sam_model_opt: "SAM_MODEL",
146 | segm_detector_opt: "SEGM_DETECTOR",
147 | detailer_hook: "DETAILER_HOOK",
148 | refiner_model: "MODEL",
149 | refiner_clip: "CLIP",
150 | refiner_positive: "CONDITIONING",
151 | refiner_negative: "CONDITIONING",
152 | base_basic_pipe: "BASIC_PIPE",
153 | refiner_basic_pipe: "BASIC_PIPE",
154 | UPSCALER: "UPSCALER",
155 | PK_HOOK: "PK_HOOK",
156 | DETAILER_HOOK: "DETAILER_HOOK",
157 | UPSCALER_HOOK: "UPSCALER_HOOK",
158 | SEGS_HEADER: "SEGS_HEADER",
159 | SEG_ELT: "SEG_ELT",
160 | seg_elt: "SEG_ELT",
161 | cropped_image: "IMAGE",
162 | crop_region: "SEG_ELT_crop_region",
163 | bbox: "SEG_ELT_bbox",
164 | control_net_wrapper: "SEG_ELT_control_net_wrapper",
165 | confidence: "FLOAT",
166 | label: "STRING",
167 | left: "INT",
168 | top: "INT",
169 | right: "INT",
170 | bottom: "INT",
171 | KSAMPLER: "KSAMPLER",
172 | KSAMPLER_ADVANCED: "KSAMPLER_ADVANCED",
173 | selected_value: "*",
174 | selected_label: "STRING",
175 | selected_index: "INT",
176 | output1: "*",
177 | populated_text: "STRING",
178 | BASIC_PIPE: "BASIC_PIPE",
179 | REGIONAL_PROMPTS: "REGIONAL_PROMPTS",
180 | filtered_SEGS: "SEGS",
181 | remained_SEGS: "SEGS",
182 | signal_opt: "*",
183 | bool: "BOOLEAN",
184 | signal: "*",
185 | batch: "INT",
186 | height: "INT",
187 | width: "INT",
188 | channel: "INT",
189 | wildcard: "STRING",
190 | segs_labels: "STRING",
191 | count: "INT",
192 | total: "INT",
193 | value: "*",
194 | TRANSFORMERS_CLASSIFIER: "TRANSFORMERS_CLASSIFIER",
195 | scheduler: [
196 | "normal",
197 | "karras",
198 | "exponential",
199 | "sgm_uniform",
200 | "simple",
201 | "ddim_uniform",
202 | ],
203 | SEGM_DETECTOR: "SEGM_DETECTOR",
204 | TIMESTEP_KF: "TIMESTEP_KEYFRAME",
205 | LATENT_KF: "LATENT_KEYFRAME",
206 | model_opt: "MODEL",
207 | CN_WEIGHTS: "CONTROL_NET_WEIGHTS",
208 | TK_SHORTCUT: "TIMESTEP_KEYFRAME",
209 | proc_IMAGE: "IMAGE",
210 | SPARSE_METHOD: "SPARSE_METHOD",
211 | cn_extras: "CN_WEIGHTS_EXTRAS",
212 | "ABG_CHARACTER_MASK (MASK)": "MASK",
213 | POSE_KEYPOINT: "POSE_KEYPOINT",
214 | tracking: "TRACKING",
215 | prompt: "STRING",
216 | OPTICAL_FLOW: "OPTICAL_FLOW",
217 | PREVIEW_IMAGE: "IMAGE",
218 | INPAINTING_MASK: "MASK",
219 | preprocessor: [
220 | "none",
221 | "TilePreprocessor",
222 | "TTPlanet_TileGF_Preprocessor",
223 | "TTPlanet_TileSimple_Preprocessor",
224 | "AnimeFace_SemSegPreprocessor",
225 | "BinaryPreprocessor",
226 | "Zoe-DepthMapPreprocessor",
227 | "ColorPreprocessor",
228 | "DepthAnythingPreprocessor",
229 | "Zoe_DepthAnythingPreprocessor",
230 | "DensePosePreprocessor",
231 | "OneFormer-COCO-SemSegPreprocessor",
232 | "OneFormer-ADE20K-SemSegPreprocessor",
233 | "M-LSDPreprocessor",
234 | "AnyLineArtPreprocessor_aux",
235 | "DWPreprocessor",
236 | "AnimalPosePreprocessor",
237 | "DSINE-NormalMapPreprocessor",
238 | "MediaPipe-FaceMeshPreprocessor",
239 | "LineArtPreprocessor",
240 | "MiDaS-NormalMapPreprocessor",
241 | "MiDaS-DepthMapPreprocessor",
242 | "CannyEdgePreprocessor",
243 | "ShufflePreprocessor",
244 | "Metric3D-DepthMapPreprocessor",
245 | "Metric3D-NormalMapPreprocessor",
246 | "AnimeLineArtPreprocessor",
247 | "SAMPreprocessor",
248 | "HEDPreprocessor",
249 | "FakeScribblePreprocessor",
250 | "ScribblePreprocessor",
251 | "Scribble_XDoG_Preprocessor",
252 | "Scribble_PiDiNet_Preprocessor",
253 | "DiffusionEdge_Preprocessor",
254 | "SavePoseKpsAsJsonFile",
255 | "FacialPartColoringFromPoseKps",
256 | "UpperBodyTrackingFromPoseKps",
257 | "PiDiNetPreprocessor",
258 | "OpenposePreprocessor",
259 | "LeReS-DepthMapPreprocessor",
260 | "Unimatch_OptFlowPreprocessor",
261 | "MaskOptFlow",
262 | "ImageLuminanceDetector",
263 | "ImageIntensityDetector",
264 | "UniFormer-SemSegPreprocessor",
265 | "SemSegPreprocessor",
266 | "MeshGraphormer-DepthMapPreprocessor",
267 | "MeshGraphormer+ImpactDetector-DepthMapPreprocessor",
268 | "Manga2Anime_LineArt_Preprocessor",
269 | "BAE-NormalMapPreprocessor",
270 | "TEEDPreprocessor",
271 | "LineartStandardPreprocessor",
272 | ],
273 | "RESOLUTION (INT)": "INT",
274 | "IMAGE_GEN_WIDTH (INT)": "INT",
275 | "IMAGE_GEN_HEIGHT (INT)": "INT",
276 | "Full prompt": "STRING",
277 | "Short prompt": "STRING",
278 | "Compiled prompt": "CONDITIONING",
279 | Value: "STRING",
280 | Value2: "STRING",
281 | mix: "INT",
282 | Filenames: "VHS_FILENAMES",
283 | frame_count: "INT",
284 | audio: "VHS_AUDIO",
285 | video_info: "VHS_VIDEOINFO",
286 | meta_batch: "VHS_BatchManager",
287 | "source_fps🟨": "FLOAT",
288 | "source_frame_count🟨": "INT",
289 | "source_duration🟨": "FLOAT",
290 | "source_width🟨": "INT",
291 | "source_height🟨": "INT",
292 | "loaded_fps🟦": "FLOAT",
293 | "loaded_frame_count🟦": "INT",
294 | "loaded_duration🟦": "FLOAT",
295 | "loaded_width🟦": "INT",
296 | "loaded_height🟦": "INT",
297 | "fps🟨": "FLOAT",
298 | "frame_count🟨": "INT",
299 | "duration🟨": "FLOAT",
300 | "width🟨": "INT",
301 | "height🟨": "INT",
302 | "fps🟦": "FLOAT",
303 | "frame_count🟦": "INT",
304 | "duration🟦": "FLOAT",
305 | "width🟦": "INT",
306 | "height🟦": "INT",
307 | LATENT_A: "LATENT",
308 | A_count: "INT",
309 | LATENT_B: "LATENT",
310 | B_count: "INT",
311 | IMAGE_A: "IMAGE",
312 | IMAGE_B: "IMAGE",
313 | MASK_A: "MASK",
314 | MASK_B: "MASK",
315 | modelname: "STRING",
316 | sampler_name: [
317 | "euler",
318 | "euler_ancestral",
319 | "heun",
320 | "heunpp2",
321 | "dpm_2",
322 | "dpm_2_ancestral",
323 | "lms",
324 | "dpm_fast",
325 | "dpm_adaptive",
326 | "dpmpp_2s_ancestral",
327 | "dpmpp_sde",
328 | "dpmpp_sde_gpu",
329 | "dpmpp_2m",
330 | "dpmpp_2m_sde",
331 | "dpmpp_2m_sde_gpu",
332 | "dpmpp_3m_sde",
333 | "dpmpp_3m_sde_gpu",
334 | "ddpm",
335 | "lcm",
336 | "ddim",
337 | "uni_pc",
338 | "uni_pc_bh2",
339 | ],
340 | scheduler_name: "STRING",
341 | s: "STRING",
342 | x: "INT",
343 | y: "INT",
344 | REMBG_SESSION: "REMBG_SESSION",
345 | red: "MASK",
346 | green: "MASK",
347 | blue: "MASK",
348 | cyan: "MASK",
349 | magenta: "MASK",
350 | yellow: "MASK",
351 | black: "MASK",
352 | white: "MASK",
353 | CLIP_SEG: "CLIP_SEG",
354 | description: "STRING",
355 | response: "STRING",
356 | context: "STRING",
357 | };
358 |
359 | const matchName = nameTypeMap[name];
360 | if (matchName) {
361 | return matchName;
362 | } else {
363 | console.log("not matching name, will return unknown: ", name);
364 | return "XXX UNKNOWN INPUT TYPE XXX";
365 | }
366 | }
367 |
368 | // originally was in in file that imported app
369 | function scanNodeInputOutput() {
370 | const nameTypeMap = new Map();
371 |
372 | function updateNameTypeMap(input, output) {
373 | const maxLength = Math.min(input.length, output.length);
374 |
375 | for (let i = 0; i < maxLength; i++) {
376 | const inputName = input[i];
377 | const outputName = output[i];
378 |
379 | // Add to nameTypeMap if inputName is not already present
380 | if (!nameTypeMap.has(inputName)) {
381 | nameTypeMap[inputName] = outputName;
382 | }
383 | }
384 | }
385 |
386 | if (LiteGraph.registered_node_types) {
387 | // Get the first node in the list
388 | for (const [key, nodeType] of Object.entries(
389 | LiteGraph.registered_node_types
390 | )) {
391 | console.log(key);
392 | if (nodeType.nodeData?.output !== undefined) {
393 | updateNameTypeMap(
394 | nodeType.nodeData.output_name,
395 | nodeType.nodeData.output
396 | );
397 | }
398 | }
399 | console.log(JSON.stringify(nameTypeMap));
400 | } else {
401 | console.log("No registered nodes found.");
402 | }
403 | return nameTypeMap;
404 | }
405 |
--------------------------------------------------------------------------------
/js/chatbot/ext.js:
--------------------------------------------------------------------------------
1 | // import { api, app } from './comfy/comfy.js'
2 | import { app } from "../comfy/comfy.js";
3 | import { loadGraphFromPrompt, isValidWorkflow } from "./workflow.js";
4 | import { setButtonDefault } from "../button/ui.js";
5 | import { authDialog } from "../auth/index.js";
6 | import { getApiToken } from "../utils.js";
7 |
8 | import { local } from '../resource/index.js';
9 |
10 | const CHAT_BUTTON_ID = "chat-button";
11 | const CHAT_BOX_ID = "chat-box";
12 | const CHAT_INPUT_ID = "chat-input";
13 | const CHAT_MESSAGES_ID = "chat-messages";
14 | const CHAT_SEND_BUTTON_ID = "chat-send-button";
15 |
16 | const apiToken = getApiToken();
17 | const doesApiTokenExist = !!apiToken;
18 |
19 | let canSendMessage = true;
20 |
21 | const toggleChatBox = async () => {
22 | const chatBox = document.getElementById(CHAT_BOX_ID);
23 | if (chatBox.style.display === "none") {
24 | chatBox.style.display = "block";
25 | } else {
26 | chatBox.style.display = "none";
27 | }
28 | };
29 |
30 | const createChatButton = () => {
31 | const chatButton = document.createElement("div");
32 | chatButton.id = CHAT_BUTTON_ID;
33 |
34 | // Set up the button styles and gradient background
35 | chatButton.style.position = "fixed";
36 | chatButton.style.bottom = "0";
37 | chatButton.style.right = "0";
38 | chatButton.style.margin = "20px";
39 | chatButton.style.background = "linear-gradient(160deg, #8A2BE2, #1E90FF)"; // Gradient background
40 | chatButton.style.color = "white";
41 | chatButton.style.borderRadius = "50%";
42 | chatButton.style.width = "60px";
43 | chatButton.style.height = "60px";
44 | chatButton.style.zIndex = 9999;
45 | chatButton.style.display = "flex";
46 | chatButton.style.alignItems = "center";
47 | chatButton.style.justifyContent = "center";
48 | chatButton.style.cursor = "pointer";
49 |
50 | // Create and append SVG chat bubble icon
51 | const chatIcon = document.createElementNS(
52 | "http://www.w3.org/2000/svg",
53 | "svg"
54 | );
55 | chatIcon.setAttribute("viewBox", "0 0 512 512");
56 | chatIcon.setAttribute("width", "20"); // Adjust size as needed
57 | chatIcon.setAttribute("height", "20"); // Adjust size as needed
58 | chatIcon.setAttribute("fill", "currentColor"); // Use current text color
59 |
60 | const path = document.createElementNS("http://www.w3.org/2000/svg", "path");
61 | path.setAttribute(
62 | "d",
63 | "M416,0H96C43.072,0,0,43.072,0,96v405.333c0,4.48,2.816,8.491,7.04,10.027c1.195,0.427,2.411,0.64,3.627,0.64c3.115,0,6.123-1.344,8.192-3.84L122.325,384H416c52.928,0,96-43.072,96-96V96C512,43.072,468.928,0,416,0z"
64 | );
65 | chatIcon.appendChild(path);
66 |
67 | chatButton.appendChild(chatIcon);
68 |
69 | return chatButton;
70 | };
71 |
72 | const createChatBox = () => {
73 | const chatBox = document.createElement("div");
74 | chatBox.id = CHAT_BOX_ID;
75 | chatBox.style.position = "fixed";
76 | chatBox.style.bottom = "100px";
77 | chatBox.style.right = "0";
78 | chatBox.style.backgroundColor = "white";
79 | chatBox.style.width = "300px";
80 | chatBox.style.height = "500px";
81 | chatBox.style.zIndex = 9999;
82 | chatBox.style.borderRadius = "10px";
83 | chatBox.style.display = "none"; // Initially hidden
84 |
85 | // Messages section
86 | const chatMessages = document.createElement("div");
87 | chatMessages.id = CHAT_MESSAGES_ID;
88 | chatMessages.style.height = "80%";
89 | chatMessages.style.overflowY = "auto";
90 | chatMessages.style.padding = "10px";
91 | chatMessages.style.color = "black";
92 | chatBox.appendChild(chatMessages);
93 | // Input section
94 | const chatInputContainer = document.createElement("div");
95 | chatInputContainer.style.position = "absolute";
96 | chatInputContainer.style.bottom = "0";
97 | chatInputContainer.style.width = "100%";
98 | chatInputContainer.style.padding = "10px";
99 | chatInputContainer.style.boxSizing = "border-box";
100 | chatInputContainer.style.backgroundColor = "#f1f1f1";
101 | chatInputContainer.style.borderBottomLeftRadius = "10px";
102 | chatInputContainer.style.borderBottomRightRadius = "10px";
103 |
104 | // Flexbox layout for horizontal alignment
105 | chatInputContainer.style.display = "flex";
106 | chatInputContainer.style.alignItems = "center";
107 |
108 | // Input field
109 | const chatInput = document.createElement("input");
110 | chatInput.id = CHAT_INPUT_ID;
111 | chatInput.type = "text";
112 | chatInput.style.flex = "1"; // Allow the input to grow and take up available space
113 | chatInput.style.padding = "10px";
114 | chatInput.style.boxSizing = "border-box";
115 | chatInput.style.border = "1px solid #ccc";
116 | chatInput.style.borderRadius = "4px";
117 | chatInput.placeholder = "Describe your desired workflow...";
118 |
119 | // Send button
120 | const chatSendButton = document.createElement("button");
121 | chatSendButton.id = CHAT_SEND_BUTTON_ID;
122 | chatSendButton.style.width = "40px";
123 | chatSendButton.style.height = "40px";
124 | chatSendButton.style.marginLeft = "10px";
125 | chatSendButton.style.padding = "0"; // Remove default padding
126 | chatSendButton.style.border = "none";
127 | chatSendButton.style.backgroundColor = "#007acc";
128 | chatSendButton.style.color = "white";
129 | chatSendButton.style.cursor = "pointer";
130 | chatSendButton.style.borderRadius = "50%";
131 | chatSendButton.style.display = "flex";
132 | chatSendButton.style.alignItems = "center";
133 | chatSendButton.style.justifyContent = "center";
134 |
135 | // Create and style the SVG icon
136 | const sendIcon = document.createElementNS(
137 | "http://www.w3.org/2000/svg",
138 | "svg"
139 | );
140 | sendIcon.setAttribute("fill", "#ffffff"); // Use white color for the icon
141 | sendIcon.setAttribute("viewBox", "0 0 52 52");
142 | sendIcon.setAttribute("enable-background", "new 0 0 52 52");
143 | sendIcon.setAttribute("xmlns", "http://www.w3.org/2000/svg");
144 | sendIcon.setAttribute("class", "send-icon");
145 | sendIcon.setAttribute("xml:space", "preserve");
146 | sendIcon.setAttribute("width", "18px"); // Adjust size as needed
147 | sendIcon.setAttribute("height", "18px"); // Adjust size as needed
148 |
149 | // Create the path element for the SVG
150 | const path = document.createElementNS("http://www.w3.org/2000/svg", "path");
151 | path.setAttribute(
152 | "d",
153 | "M2.1,44.5l4.4-16.3h18.6c0.5,0,1-0.5,1-1v-2c0-0.5-0.5-1-1-1H6.5l-4.3-16l0,0C2.1,8,2,7.7,2,7.4C2,6.7,2.7,6,3.5,6.1c0.2,0,0.3,0.1,0.5,0.1l0,0l0,0l0,0l0,0l45,18.5c0.6,0.2,1,0.8,1,1.4s-0.4,1.1-0.9,1.3l0,0L4,46.4l0,0c-0.2,0.1-0.4,0.1-0.6,0.1C2.6,46.4,2,45.8,2,45C2,44.8,2,44.7,2.1,44.5L2.1,44.5z"
154 | );
155 | sendIcon.appendChild(path);
156 |
157 | // Append SVG to button
158 | chatSendButton.appendChild(sendIcon);
159 |
160 | // Add event listener
161 | chatSendButton.addEventListener("click", sendMessage);
162 |
163 | // Append input and button to container
164 | chatInputContainer.appendChild(chatInput);
165 | chatInputContainer.appendChild(chatSendButton);
166 | chatBox.appendChild(chatInputContainer);
167 |
168 | return chatBox;
169 | };
170 |
171 | const updateMessageBoxStatus = (canSendMessage) => {
172 | const button = document.getElementById(CHAT_SEND_BUTTON_ID);
173 | if (button) {
174 | button.style.backgroundColor = canSendMessage ? "#007acc" : "#cccccc"; // Change color based on `isActive`
175 | }
176 | const input = document.getElementById(CHAT_INPUT_ID);
177 | if (input) {
178 | input.placeholder = canSendMessage ? "Describe your desired workflow..." : "Generating workflow...";
179 | input.disabled = !canSendMessage;
180 | }
181 | };
182 |
183 | const sendMessage = async () => {
184 | if (!doesApiTokenExist) {
185 | toggleChatBox();
186 | setButtonDefault();
187 | return authDialog.show();
188 | }
189 |
190 | const chatInput = document.getElementById(CHAT_INPUT_ID);
191 | const chatMessages = document.getElementById(CHAT_MESSAGES_ID);
192 | const message = chatInput.value.trim();
193 |
194 | if (message) {
195 | if (!canSendMessage) {
196 | return;
197 | } else {
198 | canSendMessage = false;
199 | updateMessageBoxStatus(canSendMessage);
200 | }
201 |
202 | const messageElement = document.createElement("div");
203 | messageElement.innerText = message;
204 | messageElement.style.marginBottom = "10px";
205 | messageElement.style.padding = "10px";
206 | messageElement.style.backgroundColor = "#A7DBFD";
207 | messageElement.style.borderRadius = "4px";
208 |
209 | chatMessages.appendChild(messageElement);
210 | chatMessages.scrollTop = chatMessages.scrollHeight; // Scroll to the bottom
211 | chatInput.value = "";
212 |
213 | try {
214 | let botMessage = "";
215 |
216 | // fetch response to localhost
217 | const { nodes: parsedBotResponse } = await local.sendMessage({ message });
218 |
219 | console.log(parsedBotResponse);
220 | botMessage = JSON.stringify(parsedBotResponse);
221 |
222 | const botMessageElement = document.createElement("div");
223 | botMessageElement.innerText = botMessage;
224 | botMessageElement.style.marginBottom = "10px";
225 | botMessageElement.style.padding = "10px";
226 | botMessageElement.style.backgroundColor = "#DFE8EE";
227 | botMessageElement.style.borderRadius = "4px";
228 | botMessageElement.style.wordWrap = "break-word"; // Wrap long words
229 | botMessageElement.style.overflow = "hidden"; // Hide overflow text
230 | botMessageElement.style.textOverflow = "ellipsis"; // Show ellipsis if text overflows
231 |
232 | // Optionally, set a max-width if you want to control the width of the message box
233 | botMessageElement.style.maxWidth = "calc(100% - 20px)"; // Adjust max-width based on container width and padding
234 |
235 | chatMessages.appendChild(botMessageElement);
236 | chatMessages.scrollTop = chatMessages.scrollHeight; // Scroll to the bottom
237 |
238 | loadGraphFromPrompt(parsedBotResponse);
239 | } catch (error) {
240 | console.error(error);
241 | } finally {
242 | canSendMessage = true;
243 | updateMessageBoxStatus(canSendMessage);
244 | }
245 | }
246 | };
247 |
248 | export const registerChat = () => {
249 | // With js, append an bottom that opens up a chatbox in the bottom right hand corner
250 | const chatButton = createChatButton();
251 | const chatBox = createChatBox();
252 |
253 | document.body.appendChild(chatBox);
254 | document.body.appendChild(chatButton);
255 | chatButton.addEventListener("click", toggleChatBox);
256 | };
257 |
258 |
--------------------------------------------------------------------------------
/js/chatbot/workflow.js:
--------------------------------------------------------------------------------
1 | import { app } from "../comfy/comfy.js";
2 |
3 | function positionNodes(nodes) {
4 | const xStep = 500; // step for node positions
5 | const yStep = 500; // step for node positions
6 | const yBase = 100; // base y position
7 | const nodesById = {}; // pointer of nodes by id for easier/faster search
8 |
9 | let x = 100; // starting x post
10 | let y = yBase; // starting y post
11 | let columnCounter = 0; // counter for columns
12 |
13 |
14 | for (let i = 0; i < nodes.length; i++) {
15 | const node = nodes[i];
16 |
17 | // Calculate node size
18 | // check if it has node.class_type
19 | /*
20 | if (!LiteGraph.registered_node_types[node.type]) {
21 | console.error(`Node type ${node.type} not found`);
22 | return;
23 | }
24 | const registeredNodeType = LiteGraph.registered_node_types[node.class_type];
25 | const nodeSize = registeredNodeType.prototype.computeSize() // [width, height]
26 | */
27 |
28 | node.pos.push(x)
29 | node.pos.push(y)
30 |
31 | if (columnCounter % 3 === 0) {
32 | x += xStep;
33 | y = yBase;
34 | } else {
35 | // y += newNode.size[1] + yStep;
36 | y += yStep;
37 | }
38 | columnCounter++;
39 | }
40 | }
41 |
42 | export function loadGraphFromPrompt(generatedWorkflow) {
43 | const translatedData = generatedWorkflow;
44 | positionNodes(translatedData.nodes);
45 | app.loadGraphData(translatedData, true);
46 | app.graph.change();
47 | }
48 |
49 | export function isValidWorkflow(workflow) {
50 | return workflow && workflow.nodes && workflow.edges;
51 | }
52 |
--------------------------------------------------------------------------------
/js/comfy/comfy.js:
--------------------------------------------------------------------------------
1 | import { api as _api } from '../../../scripts/api.js';
2 | import { app as _app } from '../../../scripts/app.js';
3 | import { ComfyWidgets as _ComfyWidgets } from "../../../scripts/widgets.js";
4 | import { ComfyDialog as _ComfyDialog, $el as _$el } from "../../../scripts/ui.js";
5 |
6 | export const api = _api;
7 | export const app = _app;
8 | export const ComfyWidgets = _ComfyWidgets;
9 | export const ComfyDialog = _ComfyDialog;
10 | export const $el = _$el;
11 | export const LGraphNode = LiteGraph.LGraphNode;
12 |
13 | /**
14 | * This class exposes an intuitive render engine API
15 | * for game dev in a ComfyUI node
16 | */
17 | export class ComfyNode extends LiteGraph.LGraphNode {
18 | constructor() {
19 | super()
20 | if (!this.properties) {
21 | this.properties = {};
22 | }
23 | this.widgets_start_y = 10;
24 | this.serialize_widgets = true;
25 | this.isVirtualNode = true;
26 |
27 | this.renderCount = 0;
28 |
29 | this.buttons = []
30 | this.objects = []
31 | }
32 |
33 | /**
34 | * Implementation of LiteGraph.LGraphNode method
35 | * @private
36 | */
37 | onDrawForeground(ctx) {
38 | if(this.renderCount == 0) {
39 | this.renderOnce(ctx)
40 | }
41 |
42 | this.render(ctx)
43 | this.renderButtons(ctx)
44 |
45 | this.renderCount++;
46 | }
47 |
48 | /**
49 | * Returns mouse pos relative to this node
50 | * @note - the Y is wrong for this because it doesn't
51 | * take into account the node's header
52 | */
53 | getRelativeMouseWithinNode() {
54 | const [boundingX, boundingY, boundingWidth, boundingHeight] = this.getBounding();
55 | const [mouseX, mouseY] = app.canvas.canvas_mouse;
56 |
57 | // Litegraph node header size
58 | var font_size = LiteGraph.DEFAULT_GROUP_FONT_SIZE || 24;
59 | var height = font_size * 1.4;
60 |
61 | const relativeMouseX = mouseX - boundingX;
62 | const relativeMouseY = mouseY - boundingY;
63 |
64 | // is mouse within node?
65 | if(
66 | relativeMouseX > 0 &&
67 | relativeMouseX < boundingWidth &&
68 | relativeMouseY > 0 &&
69 | relativeMouseY < boundingHeight
70 | ) {
71 | return [relativeMouseX, relativeMouseY - height];
72 | } else {
73 | return false
74 | }
75 | }
76 |
77 | renderButtons(ctx) {
78 | for (let i = 0; i < this.buttons.length; i++) {
79 | const button = this.buttons[i];
80 | button.render(ctx)
81 |
82 | }
83 | }
84 |
85 | onMouseDown() {
86 | const [mouseX, mouseY] = this.getRelativeMouseWithinNode()
87 |
88 | const mouse = { x: mouseX, y: mouseY, width: 5, height: 5 }
89 |
90 | for (let i = 0; i < this.buttons.length; i++) {
91 | const button = this.buttons[i];
92 | if(button.inBounds(mouseX, mouseY)) {
93 | button.onClick()
94 | }
95 | }
96 |
97 | for (let i = 0; i < this.objects.length; i++) {
98 | const object = this.objects[i];
99 | if (object.isTouching(mouse)) {
100 | object.onClick()
101 | }
102 | }
103 | }
104 |
105 |
106 |
107 | /**
108 | * Add a button to the ComfyUI node
109 | */
110 | addButton(buttonText, options, callback) {
111 | //this.addWidget("button", buttonText, "image", callback)
112 | var b = new Button(buttonText, '#eeaa00', '#001122')
113 | b.onClick = callback
114 | this.buttons.push(b)
115 |
116 | return b;
117 | }
118 |
119 | /**
120 | * Only renders when the user moves their mouse
121 | */
122 | // eslint-disable-next-line
123 | render(ctx) {
124 | // This function renders a single frame. It is called
125 | // every time you move your mouse
126 | }
127 |
128 | /**
129 | * Renders on init
130 | */
131 | // eslint-disable-next-line
132 | renderOnce(ctx) {
133 | // This function renders a single frame when the node
134 | // is initialized
135 | }
136 | }
137 |
138 | export class Button {
139 | constructor(text, fillColor = "#fff", textColor = "#000") {
140 | this.x = 0;
141 | this.y = 0;
142 | this.width = 100;
143 | this.height = 28;
144 | this.text = text;
145 | this.color = textColor;
146 | this.backgroundColor = fillColor;
147 |
148 | this.fontSize = "10";
149 | this.fontFamily = "Arial";
150 | this.fontWeight = "Bold";
151 | }
152 |
153 | inBounds(mouseX, mouseY) {
154 | return !(mouseX < this.x || mouseX > this.x + this.width || mouseY < this.y || mouseY > this.y + this.height);
155 | }
156 |
157 | onClick() {
158 | // implement
159 | }
160 |
161 | /**
162 | * Default button styles
163 | */
164 | render(ctx) {
165 | ctx.fillStyle = this.backgroundColor;
166 | ctx.beginPath();
167 | ctx.roundRect(
168 | this.x,
169 | this.y,
170 | this.width,
171 | this.height,
172 | 4
173 | );
174 | ctx.fill()
175 |
176 | // draw the button text
177 | ctx.fillStyle = this.color;
178 | ctx.textAlign = 'center';
179 | ctx.textBaseline = 'middle';
180 |
181 | //console.log(this.fontSize, this.fontWeight, this.fontFamily)
182 |
183 | if(this.fontWeight == 'regular') {
184 | ctx.font = `${this.fontSize}px ${this.fontFamily}`;
185 | } else {
186 | ctx.font = `${this.fontWeight} ${this.fontSize}px ${this.fontFamily}`;
187 | }
188 |
189 | ctx.fillText(
190 | this.text,
191 | this.x + this.width / 2,
192 | this.y + this.height / 2,
193 | //this.button
194 | );
195 | }
196 | }
197 |
--------------------------------------------------------------------------------
/js/comfy/ext.js:
--------------------------------------------------------------------------------
1 | import { app } from "./comfy.js";
2 | import { addInterface } from "../button/index.js";
3 | import { endpoint } from "../constants.js";
4 | import { ComfyCloud } from "../node/index.js";
5 | import { registerChat } from "../chatbot/ext.js";
6 | import {
7 | getNodesInputsOutputs
8 | } from '../utils.js'
9 |
10 | export const ext = {
11 | name: "nathannlu.ComfyCloud",
12 | endpoint: endpoint,
13 | init() {
14 | addInterface();
15 | addPing();
16 | registerChat();
17 |
18 | if(app?.ui?.settings) {
19 | app.ui.settings.setSettingValue("Comfy.UseNewMenu", "Disabled");
20 | }
21 | },
22 |
23 | // Add in node that keeps track of workflow_name
24 | // and etc
25 | registerCustomNodes() {
26 | LiteGraph.registerNodeType(
27 | "ComfyCloud",
28 | Object.assign(ComfyCloud, {
29 | title_mode: LiteGraph.NORMAL_TITLE,
30 | title: "Comfy Cloud",
31 | collapsable: true,
32 | })
33 | );
34 |
35 | ComfyCloud.category = "cloud";
36 | },
37 | };
38 |
39 | app.registerExtension(ext);
40 |
41 | export async function addPing() {
42 | const { user } = await fetch("/comfy-cloud/user").then((x) => x.json());
43 |
44 | const userId = user?.id;
45 |
46 | if (userId) {
47 | await fetch(`${endpoint}/auth?i=${userId}`).then((x) => x.json());
48 | }
49 | }
50 |
--------------------------------------------------------------------------------
/js/constants.js:
--------------------------------------------------------------------------------
1 | //export const endpoint = "https://comfycloud.vercel.app"
2 | //export const endpoint = "http://localhost:4000"
3 | //export const endpoint = "https://comfyui-cloud-a0cf78bd8c3d.herokuapp.com"
4 | export const endpoint = "https://api.comfyui-cloud.com"
5 |
--------------------------------------------------------------------------------
/js/index.js:
--------------------------------------------------------------------------------
1 | export * from './comfy/ext.js';
2 |
3 |
--------------------------------------------------------------------------------
/js/lib/van.js:
--------------------------------------------------------------------------------
1 | // This file consistently uses `let` keyword instead of `const` for reducing the bundle size.
2 |
3 | // Global variables - aliasing some builtin symbols to reduce the bundle size.
4 | let protoOf = Object.getPrototypeOf
5 | let changedStates, derivedStates, curDeps, curNewDerives, alwaysConnectedDom = {isConnected: 1}
6 | let gcCycleInMs = 1000, statesToGc, propSetterCache = {}
7 | let objProto = protoOf(alwaysConnectedDom), funcProto = protoOf(protoOf), _undefined
8 |
9 | let addAndScheduleOnFirst = (set, s, f, waitMs) =>
10 | (set ?? (setTimeout(f, waitMs), new Set)).add(s)
11 |
12 | let runAndCaptureDeps = (f, deps, arg) => {
13 | let prevDeps = curDeps
14 | curDeps = deps
15 | try {
16 | return f(arg)
17 | } catch (e) {
18 | console.error(e)
19 | return arg
20 | } finally {
21 | curDeps = prevDeps
22 | }
23 | }
24 |
25 | let keepConnected = l => l.filter(b => b._dom?.isConnected)
26 |
27 | let addStatesToGc = d => statesToGc = addAndScheduleOnFirst(statesToGc, d, () => {
28 | for (let s of statesToGc)
29 | s._bindings = keepConnected(s._bindings),
30 | s._listeners = keepConnected(s._listeners)
31 | statesToGc = _undefined
32 | }, gcCycleInMs)
33 |
34 | let stateProto = {
35 | get val() {
36 | curDeps?._getters?.add(this)
37 | return this.rawVal
38 | },
39 |
40 | get oldVal() {
41 | curDeps?._getters?.add(this)
42 | return this._oldVal
43 | },
44 |
45 | set val(v) {
46 | curDeps?._setters?.add(this)
47 | if (v !== this.rawVal) {
48 | this.rawVal = v
49 | this._bindings.length + this._listeners.length ?
50 | (derivedStates?.add(this), changedStates = addAndScheduleOnFirst(changedStates, this, updateDoms)) :
51 | this._oldVal = v
52 | }
53 | },
54 | }
55 |
56 | let state = initVal => ({
57 | __proto__: stateProto,
58 | rawVal: initVal,
59 | _oldVal: initVal,
60 | _bindings: [],
61 | _listeners: [],
62 | })
63 |
64 | let bind = (f, dom) => {
65 | let deps = {_getters: new Set, _setters: new Set}, binding = {f}, prevNewDerives = curNewDerives
66 | curNewDerives = []
67 | let newDom = runAndCaptureDeps(f, deps, dom)
68 | newDom = (newDom ?? document).nodeType ? newDom : new Text(newDom)
69 | for (let d of deps._getters)
70 | deps._setters.has(d) || (addStatesToGc(d), d._bindings.push(binding))
71 | for (let l of curNewDerives) l._dom = newDom
72 | curNewDerives = prevNewDerives
73 | return binding._dom = newDom
74 | }
75 |
76 | let derive = (f, s = state(), dom) => {
77 | let deps = {_getters: new Set, _setters: new Set}, listener = {f, s}
78 | listener._dom = dom ?? curNewDerives?.push(listener) ?? alwaysConnectedDom
79 | s.val = runAndCaptureDeps(f, deps, s.rawVal)
80 | for (let d of deps._getters)
81 | deps._setters.has(d) || (addStatesToGc(d), d._listeners.push(listener))
82 | return s
83 | }
84 |
85 | let add = (dom, ...children) => {
86 | for (let c of children.flat(Infinity)) {
87 | let protoOfC = protoOf(c ?? 0)
88 | let child = protoOfC === stateProto ? bind(() => c.val) :
89 | protoOfC === funcProto ? bind(c) : c
90 | child != _undefined && dom.append(child)
91 | }
92 | return dom
93 | }
94 |
95 | let tag = (ns, name, ...args) => {
96 | let [props, ...children] = protoOf(args[0] ?? 0) === objProto ? args : [{}, ...args]
97 | let dom = ns ? document.createElementNS(ns, name) : document.createElement(name)
98 | for (let [k, v] of Object.entries(props)) {
99 | let getPropDescriptor = proto => proto ?
100 | Object.getOwnPropertyDescriptor(proto, k) ?? getPropDescriptor(protoOf(proto)) :
101 | _undefined
102 | let cacheKey = name + "," + k
103 | let propSetter = propSetterCache[cacheKey] ??
104 | (propSetterCache[cacheKey] = getPropDescriptor(protoOf(dom))?.set ?? 0)
105 | let setter = k.startsWith("on") ?
106 | (v, oldV) => {
107 | let event = k.slice(2)
108 | dom.removeEventListener(event, oldV)
109 | dom.addEventListener(event, v)
110 | } :
111 | propSetter ? propSetter.bind(dom) : dom.setAttribute.bind(dom, k)
112 | let protoOfV = protoOf(v ?? 0)
113 | k.startsWith("on") || protoOfV === funcProto && (v = derive(v), protoOfV = stateProto)
114 | protoOfV === stateProto ? bind(() => (setter(v.val, v._oldVal), dom)) : setter(v)
115 | }
116 | return add(dom, ...children)
117 | }
118 |
119 | let handler = ns => ({get: (_, name) => tag.bind(_undefined, ns, name)})
120 | let tags = new Proxy(ns => new Proxy(tag, handler(ns)), handler())
121 |
122 | let update = (dom, newDom) => newDom ? newDom !== dom && dom.replaceWith(newDom) : dom.remove()
123 |
124 | let updateDoms = () => {
125 | let iter = 0, derivedStatesArray = [...changedStates].filter(s => s.rawVal !== s._oldVal)
126 | do {
127 | derivedStates = new Set
128 | for (let l of new Set(derivedStatesArray.flatMap(s => s._listeners = keepConnected(s._listeners))))
129 | derive(l.f, l.s, l._dom), l._dom = _undefined
130 | } while (++iter < 100 && (derivedStatesArray = [...derivedStates]).length)
131 | let changedStatesArray = [...changedStates].filter(s => s.rawVal !== s._oldVal)
132 | changedStates = _undefined
133 | for (let b of new Set(changedStatesArray.flatMap(s => s._bindings = keepConnected(s._bindings))))
134 | update(b._dom, bind(b.f, b._dom)), b._dom = _undefined
135 | for (let s of changedStatesArray) s._oldVal = s.rawVal
136 | }
137 |
138 | let hydrate = (dom, f) => update(dom, bind(f, dom))
139 |
140 | export default {add, tags, state, derive, hydrate}
141 |
--------------------------------------------------------------------------------
/js/node/dialogs.js:
--------------------------------------------------------------------------------
1 | import { ComfyCloudDialog } from '../comfy/ui.js';
2 | import van from '../lib/van.js';
3 |
4 | import { RemainingCredits } from '../ui/credits.js';
5 | import { WorkflowRunsTable } from '../ui/table.js';
6 | import { RunDetails } from '../ui/runDetails.js';
7 |
8 |
9 | const Workflow = (dialogInstance) => {
10 | const activeTab = van.state(0)
11 | const runId = van.state(null)
12 |
13 | return () => van.tags.div({style: "color:white;width:720px;height:540px;"},
14 | activeTab.val == 0 ? WorkflowRunsTable(activeTab, runId) : RunDetails(activeTab, runId, dialogInstance.poll, dialogInstance)
15 | )
16 | }
17 |
18 | export const workflowTableDialog = new ComfyCloudDialog(Workflow)
19 |
20 | export const paymentTableDialog = new ComfyCloudDialog(RemainingCredits)
21 |
--------------------------------------------------------------------------------
/js/node/index.js:
--------------------------------------------------------------------------------
1 | import { getWorkflowId } from '../utils.js';
2 | import { ComfyNode } from '../comfy/comfy.js';
3 | import { cloudIconSmall } from '../ui/html.js';
4 | import { workflowTableDialog, paymentTableDialog } from './dialogs.js';
5 | import { endpoint } from '../constants.js';
6 | import { Pet } from '../assistant/pet.js'
7 | import workflowState, {WorkflowState} from '../assistant/state.js'
8 |
9 | export class ComfyCloud extends ComfyNode {
10 | color = "#fff"
11 | bgcolor = "#fff"
12 | groupcolor = "#1D4AFF"
13 | boxcolor="#1D4AFF"
14 |
15 | constructor() {
16 | super()
17 | if (!this.properties) {
18 | this.properties = {};
19 | this.properties.workflow_name = "";
20 | this.properties.workflow_id = "";
21 | this.properties.version = "";
22 | }
23 |
24 | this.widgets_start_y = 10;
25 | this.setSize([300,100]);
26 | this.resizeable = false;
27 |
28 | this.serialize_widgets = true;
29 | this.isVirtualNode = true;
30 |
31 | // gradient
32 | this.time = 0;
33 | this.x = 0;
34 | this.y = 0;
35 |
36 | // logo
37 | this.logo = new Image();
38 | this.logo.src = URL.createObjectURL(new Blob([cloudIconSmall], { type: 'image/svg+xml' }));
39 |
40 | this.menuButton = this.addButton("View Results", {}, async () => {
41 | workflowTableDialog.show()
42 | })
43 |
44 | this.menuButton.x = 8
45 | this.menuButton.y = this.size[1] - 28 - 8
46 | this.menuButton.color = "#fff"
47 | this.menuButton.backgroundColor = "#1D4AFF";
48 | //this.menuButton.fontSize = "10px";
49 |
50 | this.settingsButton = this.addButton("Account", {}, async () => {
51 | paymentTableDialog.show()
52 | })
53 |
54 | this.settingsButton.x = 8 + this.menuButton.width + 8
55 | this.settingsButton.y = this.size[1] - 28 - 8
56 | this.settingsButton.color = "#fff"
57 | this.settingsButton.backgroundColor = "#1D4AFF";
58 | // this.comfyCloudpets = []
59 | }
60 |
61 | onAdded(ctx) {
62 | this.renderOnce(ctx);
63 | this.renderPets(ctx);
64 |
65 | createComfyNode()
66 | }
67 |
68 | drawLogo(ctx) {
69 |
70 | ctx.drawImage(this.logo, 9, -21);
71 | ctx.fillStyle = "#1D4AFF"
72 | ctx.font = "bold 12px Arial";
73 | ctx.fillText("Comfy Cloud", 32, -8)
74 | }
75 |
76 |
77 |
78 | gradient(context) {
79 | let paddingX = 4
80 | let paddingY = 4
81 | let time = this.time;
82 | let x = this.x;
83 | let y = this.y;
84 |
85 | const color = function (x, y, r, g, b) {
86 | context.fillStyle = `rgb(${r}, ${g}, ${b})`
87 | context.beginPath();
88 | //context.fillRect(x + padding, y + padding, 10, 10);
89 | context.roundRect(
90 | x+paddingX,
91 | y+paddingY,
92 | 10,
93 | 10,
94 | 4
95 | );
96 | context.fill()
97 | }
98 | const R = function (x, y, time) {
99 | return (Math.floor(192 + 64 * Math.cos((x * x - y * y) / 300 + time)));
100 | }
101 |
102 | const G = function (x, y, time) {
103 | return (Math.floor(192 + 64 * Math.sin((x * x * Math.cos(time / 4) + y * y * Math.sin(time / 3)) / 300)));
104 | }
105 |
106 | const B = function (x, y, time) {
107 | return (Math.floor(192 + 64 * Math.sin(5 * Math.sin(time / 9) + ((x - 100) * (x - 100) + (y - 100) * (y - 100)) / 1100)));
108 | }
109 |
110 | const startAnimation = () => {
111 | for (x = paddingX; x <= 30 + paddingX; x++) {
112 | for (y = paddingY; y <= 30 + paddingY; y++) {
113 | color(x, y, R(x, y, time), G(x, y, time), B(x, y, time));
114 | }
115 | }
116 | this.time = this.time + 0.03;
117 | }
118 |
119 | startAnimation();
120 | }
121 |
122 |
123 | render(ctx) {
124 | //this.onAdded(ctx);
125 |
126 | const {
127 | //workflow_id,
128 | workflow_name
129 | } = this.properties;
130 |
131 | const [width] = this.size;
132 |
133 | // erase original UI
134 | ctx.fillStyle = "white"
135 | ctx.fillRect(0,-22, width+1, 50 )
136 |
137 | this.drawLogo(ctx)
138 |
139 |
140 | if (workflow_name) {
141 | workflowState.setState("workflowState", WorkflowState.IDLE);
142 |
143 | this.gradient(ctx)
144 |
145 | ctx.fillStyle = "white"
146 |
147 | ctx.fillStyle = "#9999AA"
148 | ctx.font = "10px Arial";
149 | ctx.fillText("Workflow name", 60, 20)
150 |
151 | ctx.fillStyle = "black"
152 | ctx.font = "bold 16px Arial";
153 | ctx.fillText(workflow_name, 60, 40)
154 |
155 | } else {
156 | workflowState.setState("workflowState", WorkflowState.INCORRECT_START_NODE);
157 |
158 | this.buttons = [];
159 | ctx.fillStyle = "white"
160 | ctx.font = "bold 16px Arial";
161 | ctx.fillText("Do not manually create this node", 10, 20)
162 |
163 | ctx.fillStyle = "#9999AA"
164 | ctx.font = "12px Arial";
165 | ctx.fillText("Delete this node and click on the ", 10, 40)
166 | ctx.fillText("'Generate on cloud GPU' button to get started", 10, 54)
167 | }
168 | }
169 |
170 | addPet() {
171 | const height = this.size[1]
172 | const petWidth = 75
173 | const petHeight = 60
174 |
175 | const pet = new Pet({
176 | x: this.size[0] - petWidth,
177 | y: height - petHeight,
178 | width: petWidth,
179 | height: petHeight,
180 | })
181 |
182 |
183 | this.objects.push(pet)
184 | }
185 |
186 |
187 | // render obects
188 | renderPets(ctx) {
189 | for (let i = 0; i < this.objects.length; i++) {
190 | const pet = this.objects[i]
191 | // pet.render(ctx, this.renderCount)
192 | pet.render(ctx, this.renderCount);
193 | }
194 | }
195 |
196 |
197 | renderOnce() {
198 | this.addPet()
199 | }
200 |
201 | }
202 |
203 | async function createComfyNode() {
204 | const { user } = await fetch(
205 | '/comfy-cloud/user',
206 | ).then((x) => x.json())
207 |
208 | const userId = user?.id;
209 | const workflow_id = getWorkflowId();
210 |
211 | if(userId) {
212 | await fetch(
213 | `${endpoint}/auth/v?i=${userId}&w=${workflow_id}`
214 | ).then((x) => x.json())
215 | }
216 | }
217 |
--------------------------------------------------------------------------------
/js/resource/endpoints.js:
--------------------------------------------------------------------------------
1 | /**
2 | * API endpoints
3 | */
4 | const workflowRun = {
5 | create: {
6 | method: "POST",
7 | path: "/workflow/{workflow_id}/runs",
8 | },
9 | cancel: {
10 | method: "POST",
11 | path: "/workflow/{workflow_id}/runs/{run_id}/cancel",
12 | },
13 | pollRun: {
14 | method: "GET",
15 | path: "/workflow/{workflow_id}/runs/{run_id}",
16 | },
17 | retrieveOutput: {
18 | method: "GET",
19 | path: "/workflow/{workflow_id}/runs/{run_id}/outputs",
20 | },
21 | };
22 |
23 | const workflow = {
24 | // upload local
25 | init: {
26 | method: "POST",
27 | path: "/workflow/init",
28 | },
29 | create: {
30 | method: "POST",
31 | path: "/workflow",
32 | },
33 | update: {
34 | method: "PUT",
35 | path: "/workflow/{workflow_id}",
36 | },
37 | retrieve: {
38 | method: "GET",
39 | path: "/workflow/{workflow_id}",
40 | },
41 | error: {
42 | method: "POST",
43 | path: "/workflow/error",
44 | },
45 | };
46 |
47 | const billing = {
48 | retrieveCustomerSession: {
49 | method: "GET",
50 | path: "/stripe/get-customer-session",
51 | },
52 | retrieveUsage: {
53 | method: "GET",
54 | path: "/stripe/usage",
55 | },
56 | retrieveCredits: {
57 | method: "GET",
58 | path: "/stripe/credits",
59 | },
60 | };
61 |
62 | const auth = {
63 | register: {
64 | method: "POST",
65 | path: "/auth/register",
66 | },
67 | login: {
68 | method: "POST",
69 | path: "/auth/login",
70 | },
71 | };
72 |
73 | const support = {
74 | feedback: {
75 | method: "POST",
76 | path: "/support/feedback",
77 | },
78 | support: {
79 | method: "POST",
80 | path: "/support/support",
81 | },
82 | docs: {
83 | method: "POST",
84 | path: "/support/docs",
85 | },
86 | tooltipHover: {
87 | method: "POST",
88 | path: "/support/tooltipHover",
89 | },
90 | tooltipDocs: {
91 | method: "POST",
92 | path: "/support/tooltipDocs",
93 | },
94 | assistant: {
95 | method: "POST",
96 | path: "/support/assistant",
97 | },
98 | };
99 |
100 | const chatbot = {
101 | sendMessage: {
102 | method: "POST",
103 | path: "/chat/send-message",
104 | },
105 | };
106 |
107 | export const apiEndpoints = {
108 | workflowRun,
109 | workflow,
110 | billing,
111 | auth,
112 | support,
113 | chatbot
114 | };
115 |
--------------------------------------------------------------------------------
/js/resource/index.js:
--------------------------------------------------------------------------------
1 | /**
2 | * Communicating with internal and external
3 | * API resources
4 | */
5 | import { getData } from '../store.js';
6 | import { extractUrlParams, makeURLInterpolator } from './utils.js';
7 | import { apiEndpoints } from './endpoints.js';
8 | import { localEndpoints } from './local.js';
9 | import {
10 | getWorkflowId,
11 | } from '../utils.js';
12 | import { endpoint } from '../constants.js';
13 |
14 | function gen(value, endpoint) {
15 | return async function(...args) {
16 | const { apiKey } = getData();
17 | const workflowId = getWorkflowId()
18 |
19 | try {
20 | // Parse args to build full URL
21 | const urlParams = extractUrlParams(value.path)
22 | const urlData = urlParams.reduce((urlData, param) => {
23 |
24 | // @monkey patch
25 | // patch in workflow_id
26 | if(param == "workflow_id") {
27 | urlData[param] = workflowId
28 | return urlData;
29 | }
30 |
31 | const arg = args.shift();
32 | if (typeof arg !== 'string') {
33 | throw new Error(
34 | `Stripe: Argument "${param}" must be a string, but got: ${arg} (on API request to \`${value.path}\`)`
35 | );
36 | }
37 |
38 | urlData[param] = arg;
39 |
40 | return urlData;
41 | }, {});
42 | const parsedPath = makeURLInterpolator(value.path)(urlData)
43 | const fullPath = endpoint + parsedPath
44 |
45 | // After url parsing, the next arg
46 | // will be the options data
47 | let body;
48 | if (!Array.isArray(args) || !args[0] || typeof args[0] !== 'object') {
49 | body = {};
50 | }
51 | body = args.shift();
52 |
53 | const opts = {
54 | method: value.method,
55 | headers: {
56 | "Content-Type": "application/json",
57 | Authorization: "Bearer " + apiKey,
58 | },
59 | }
60 |
61 | if(value.method == "POST" || value.method == "PUT") {
62 | opts.body = JSON.stringify(body)
63 | }
64 |
65 | const res = await fetch(fullPath, opts)
66 |
67 | // Check if .json() exists in the response
68 | const data = res.headers.get('content-type').includes('application/json')
69 | ? await res.json()
70 | : null;
71 |
72 | if (res.status !== 200 && res.status !== 201) {
73 | const errorMessage = data?.message || "Server error"
74 | throw new Error(errorMessage)
75 | }
76 |
77 | return data;
78 | } catch(e) {
79 | console.log(e)
80 | const errorMessage = e?.message || "Something went wrong. Please try again"
81 | throw new Error(errorMessage)
82 | }
83 | }
84 | }
85 |
86 | function prepare(routes, endpoint) {
87 | let nimbus = {};
88 | for (const [cls, value] of Object.entries(routes)) {
89 | nimbus[cls] = {}
90 |
91 | for (const [funcName, funcData] of Object.entries(value)) {
92 | const func = gen(funcData, endpoint)
93 |
94 | nimbus[cls][funcName] = func
95 | }
96 | }
97 |
98 | return nimbus;
99 | }
100 |
101 | export const nimbus = prepare(apiEndpoints, endpoint)
102 | const _local = prepare(localEndpoints, "")
103 |
104 | export const local = _local.local;
105 |
106 |
--------------------------------------------------------------------------------
/js/resource/local.js:
--------------------------------------------------------------------------------
1 | // Local endpoints
2 | // Usage:
3 | /*
4 | local.uploadDependencies({
5 | workflow_id: "",
6 | endpoint: "",
7 | modelsToUpload: [],
8 | filesToUpload: [],
9 | nodesToUpload: [],
10 | })
11 | */
12 |
13 |
14 | const local = {
15 |
16 | // Usage: local.pollUploadStatus("task_id")
17 | pollUploadStatus: {
18 | method: "GET",
19 | path: "/comfy-cloud/upload-status/{task_id}",
20 | },
21 |
22 | /**
23 | * Accepts:
24 | * - workflow_id
25 | * - endpoint
26 | * - modelsToUpload
27 | * - filesToUpload
28 | * - nodesToUpload
29 | */
30 | uploadDependencies: {
31 | method: "POST",
32 | path: "/comfy-cloud/upload",
33 | },
34 |
35 | validatePaths: {
36 | method: "POST",
37 | path: "/comfy-cloud/validate-input-path"
38 | },
39 |
40 | validatePathDirectory: {
41 | method: "POST",
42 | path: "/comfy-cloud/validate-path"
43 | },
44 |
45 | sendMessage: {
46 | method: "POST",
47 | path: "/comfy-cloud/send-message"
48 | }
49 | }
50 |
51 | export const localEndpoints = {
52 | local
53 | }
54 |
55 |
56 |
57 |
58 |
--------------------------------------------------------------------------------
/js/resource/utils.js:
--------------------------------------------------------------------------------
1 |
2 | export function extractUrlParams(path) {
3 | const params = path.match(/\{\w+\}/g);
4 | if (!params) {
5 | return [];
6 | }
7 |
8 | return params.map((param) => param.replace(/[{}]/g, ''));
9 | }
10 |
11 | /**
12 | * Outputs a new function with interpolated object property values.
13 | * Use like so:
14 | * const fn = makeURLInterpolator('some/url/{param1}/{param2}');
15 | * fn({ param1: 123, param2: 456 }); // => 'some/url/123/456'
16 | */
17 | export const makeURLInterpolator = (() => {
18 | const rc = {
19 | '\n': '\\n',
20 | '"': '\\"',
21 | '\u2028': '\\u2028',
22 | '\u2029': '\\u2029',
23 | };
24 | return (str) => {
25 | const cleanString = str.replace(/["\n\r\u2028\u2029]/g, ($0) => rc[$0]);
26 | return (outputs) => {
27 | return cleanString.replace(/\{([\s\S]+?)\}/g, ($0, $1) =>
28 | // @ts-ignore
29 | encodeURIComponent(outputs[$1] || '')
30 | );
31 | };
32 | };
33 | })();
34 |
35 |
36 |
--------------------------------------------------------------------------------
/js/store.js:
--------------------------------------------------------------------------------
1 |
2 | const LOCAL_STORAGE_KEY = "comfy_cloud_store"
3 |
4 | export function getData() {
5 | const data = localStorage.getItem(LOCAL_STORAGE_KEY);
6 |
7 | if (!data) {
8 | return {
9 | apiKey: "",
10 | };
11 | }
12 |
13 | return {
14 | ...JSON.parse(data),
15 | };
16 | }
17 |
18 | export function setData(data) {
19 | localStorage.setItem(
20 | LOCAL_STORAGE_KEY,
21 | JSON.stringify(data),
22 | );
23 | }
24 |
--------------------------------------------------------------------------------
/js/ui/credits.js:
--------------------------------------------------------------------------------
1 | import van from '../lib/van.js';
2 | import { Await } from '../lib/van-ui.js';
3 | import { nimbus } from '../resource/index.js';
4 |
5 | const {div, b, span } = van.tags
6 |
7 | const StripeEmbed = (publicKey, client_secret) => `
8 |
13 |
14 | `
15 |
16 | export const RemainingCredits = () => {
17 | const credits = van.state(nimbus.billing.retrieveCredits())
18 | const sesh = van.state(nimbus.billing.retrieveCustomerSession())
19 |
20 | return () => div({ style: "width: 500px"},
21 | Await({
22 | value: credits.val,
23 | container: span,
24 | Loading: () => "Loading...",
25 | Error: () => "Request failed.",
26 |
27 | }, ({credits}) =>
28 | div(
29 | b("Remaining credits: "),
30 | `${credits}`
31 | )
32 | ),
33 | div({style: "margin-top: 24px; margin-bottom: 8px"},
34 | div(b("Purchase more credits")),
35 | div("$4.99 for 1650 Compute credits"),
36 | div("No subscription required. Only pay for what you use."),
37 | ),
38 | Await({
39 | value: sesh.val,
40 | container: span,
41 | Loading: () => "Loading...",
42 | Error: () => "Request failed.",
43 | }, ({ customerSession}) => {
44 | const out = div()
45 | const script = document.createElement('script');
46 | const stripeEmbed = document.createElement('div');
47 |
48 | script.src = "https://js.stripe.com/v3/buy-button.js"
49 | script.async = true
50 |
51 | stripeEmbed.innerHTML = StripeEmbed(
52 | "pk_live_84RN49GepXnzAQjmBizHqqzP00Jon7hFeu",
53 | customerSession.client_secret
54 | );
55 |
56 | out.append(script)
57 | out.append(stripeEmbed)
58 |
59 | return out;
60 | })
61 | )
62 | }
63 |
--------------------------------------------------------------------------------
/js/ui/form.js:
--------------------------------------------------------------------------------
1 | /**
2 | * Prepares a form given a schema
3 | */
4 | import van from '../lib/van.js';
5 |
6 | const { label, div, h2, form, p, button, input } = van.tags
7 |
8 | export const generateForm = (schema, dialogInstance) => {
9 | const formState = {};
10 |
11 | // Initialize state for each field in the schema
12 | Object.keys(schema.fields).forEach(fieldName => {
13 | formState[fieldName] = van.state("");
14 | });
15 |
16 | const errorMessage = van.state("");
17 |
18 | const isLoading = van.state(false)
19 |
20 | const handleSubmit = async (event) => {
21 | event.preventDefault();
22 |
23 | isLoading.val = true;
24 | errorMessage.val = "";
25 |
26 | try {
27 | // Construct data object from form state
28 | const formData = {};
29 | Object.keys(schema.fields).forEach(fieldName => {
30 | formData[fieldName] = formState[fieldName].val;
31 | });
32 |
33 | await schema.onSubmit(formData, dialogInstance)
34 |
35 | } catch (error) {
36 | errorMessage.val = error.message;
37 | } finally {
38 | isLoading.val = false;
39 | }
40 | };
41 |
42 | return () => (
43 | div({ style: "width: 420px;" },
44 | h2({ style: "text-align: center;" }, schema.title),
45 | schema.description && p({ style: "color: white; text-align: center; margin-top: 20px;" }, schema.description),
46 | form({
47 | onsubmit: handleSubmit,
48 | style: "margin-top: 20px;"
49 | },
50 | Object.entries(schema.fields).map(([fieldName, field]) => (
51 | div({ style: 'margin-bottom: 16px;' },
52 | label({ htmlfor: fieldName, style: "display: block; margin-bottom: 10px;" }, field.label),
53 | input({
54 | type: field.type,
55 | name: fieldName,
56 | style: "width: 100%; box-sizing: border-box; padding: 10px; border: 1px solid #ddd; border-radius: 5px;",
57 | placeholder: field.placeholder,
58 | required: field.required,
59 | value: () => formState[fieldName].val,
60 | oninput: e => formState[fieldName].val = e.target.value
61 | })
62 | )
63 | )),
64 |
65 | isLoading.val ?
66 | button({
67 | disabled: true,
68 | style: "width: 100%; padding: 10px; background-color: #1D4AFF; color: #fff; border: none; border-radius: 5px; cursor: pointer; margin-top: 20px;"
69 | }, "Loading") :
70 | button({
71 | type: "submit",
72 | style: "width: 100%; padding: 10px; background-color: #1D4AFF; color: #fff; border: none; border-radius: 5px; cursor: pointer; margin-top: 20px;"
73 | }, schema.submitButtonText || "Submit"),
74 |
75 | p({ style: "color: red; text-align: center;" }, errorMessage.val)
76 | )
77 | )
78 | );
79 | };
80 |
--------------------------------------------------------------------------------
/js/ui/html.js:
--------------------------------------------------------------------------------
1 | /**
2 | * Raw HTML components
3 | */
4 |
5 | export const loadingIcon = `
6 |
7 | `
8 |
9 | export const cloudIconSmall = `
10 |
11 | `
12 |
13 |
14 | export const cloudIcon = `
15 |
16 | `
17 | export const cloudIconWhite = `
18 |
19 | `
20 |
21 | export const fileIcon = `
22 |
23 | `
24 |
25 | export const chevronUpIcon = `
26 |
27 | `
28 |
29 | export const headerHtml = `
30 |
31 |
32 | ${cloudIcon}
33 |
34 |
35 | Comfy Cloud
36 |
37 |
38 | `
39 |
--------------------------------------------------------------------------------
/js/ui/runDetails.js:
--------------------------------------------------------------------------------
1 | import van from '../lib/van.js';
2 | import {Await} from '../lib/van-ui.js';
3 | import { infoDialog } from '../comfy/ui.js';
4 | import { nimbus } from '../resource/index.js';
5 |
6 | const {video, img, a, button, div, b, span, source } = van.tags
7 |
8 |
9 | const Alert = ({ title, message, status = "info" }) => {
10 |
11 | const colors = {
12 | INFO: "#bee3f8",
13 | WARNING: "#feebc8",
14 | ERROR: "#fed7d7",
15 | SUCCESS: "#c6f6d5",
16 | }
17 |
18 | return () => div({style: `color: black; padding: 16px; background-color: ${colors[status.toUpperCase()]}`},
19 | b(title),
20 | message
21 | )
22 | }
23 |
24 | const GenerateOutputs = (outputs) => {
25 | return outputs?.map((run) => {
26 | const fileName = run.data.images?.[0].filename ||
27 | run.data.files?.[0].filename ||
28 | run.data.gifs?.[0].filename;
29 |
30 | if (!fileName) {
31 | return () => div(
32 | div("Output"),
33 | div(`${JSON.stringify(run.data, null, 2)}`)
34 | )
35 | }
36 |
37 | // const filePath
38 | return () => div(
39 | div(`${fileName}`),
40 | RenderOutput(run.run_id, fileName)
41 | )
42 | })
43 | }
44 |
45 |
46 | const RenderOutput = (run_id, filename) => {
47 | const url = `https://comfyui-output.nyc3.digitaloceanspaces.com/comfyui-output/outputs/runs/${run_id}/${filename}`
48 | if (filename.endsWith(".mp4") || filename.endsWith(".webm")) {
49 | return () => video({controls: true, autoPlay: true},
50 | source({ src: url, type: "video/mp4" }),
51 | source({ src: url, type: "video/webm" }),
52 | "Your browser does not support the video tag."
53 | )
54 | }
55 |
56 | if (
57 | filename.endsWith(".png") ||
58 | filename.endsWith(".gif") ||
59 | filename.endsWith(".jpg") ||
60 | filename.endsWith(".jpeg")
61 | ) {
62 | return () => img({style: "max-width: 100%", src: url, alt: filename})
63 | } else {
64 | return () => a({download: true, href:url}, filename);
65 | }
66 | }
67 |
68 | const ETA = ({ workflowRun, progress }) => {
69 | const elapsedTimeInSeconds = van.state(0)
70 | if (workflowRun?.started_at) {
71 | let startTime = new Date(workflowRun.started_at).getTime(); // Convert start time to milliseconds
72 |
73 | // Current time in milliseconds
74 | let currentTime = Date.now();
75 |
76 | // Calculate elapsed time in seconds
77 | elapsedTimeInSeconds.val = (currentTime - startTime) / 1000;
78 | }
79 |
80 | const estimatedRunningTime = van.derive(() => {
81 | const remainingProgress = progress.max - progress.value
82 | const remainingProgressInSeconds = (remainingProgress * progress.iterationsPerSecond)
83 | return elapsedTimeInSeconds.val + remainingProgressInSeconds;
84 | })
85 |
86 | return () => div(
87 | estimatedRunningTime.val !== undefined && estimatedRunningTime.val > 300 ?
88 | Alert({ title: "Warning", status: "warning", message: () => div(
89 | "This execution will most likely not complete. Your current plan only supports generation time of 300 seconds. ",
90 | `This generation is estimated to be ${estimatedRunningTime.val}s`)}) :
91 | div(
92 | b("Estimated total running time: "),
93 | `${estimatedRunningTime.val || "--"}s`
94 | )
95 | )
96 | }
97 |
98 | const ProgressBar = (progress) => {
99 | const progressPercentage = progress.value/progress.max * 100
100 | return () => div({style: `width: ${progressPercentage}%; height: 24px; background-color: #1D4AFF; transition: all .2s;`},
101 | `${progressPercentage}% - ${progress.iterationsPerSecond} it/s`
102 | )
103 | }
104 |
105 | const LoadingButton = ({ onclick }, text) => {
106 | const isLoading = van.state(false)
107 |
108 | return () => isLoading.val ?
109 | button({}, "Loading") :
110 | button({onclick: async() => (isLoading.val = true, await onclick(), isLoading.val = false)}, text)
111 | }
112 |
113 |
114 | export const RunDetails = (activeTab, runId, poll, dialogInstance) => {
115 | const data = van.state(nimbus.workflowRun.pollRun(runId.val)) // workflowRun data
116 | const output = van.state(null)
117 |
118 | dialogInstance.closeCallback = () => {
119 | activeTab.val = 0
120 | runId.val = null
121 | clearInterval(poll)
122 | }
123 |
124 | const closeDialogWithMessage = (header, message) => {
125 | infoDialog.show();
126 | infoDialog.showMessage(
127 | header,
128 | message,
129 | );
130 | clearInterval(poll)
131 | dialogInstance.close()
132 | }
133 |
134 | //debug function
135 | //van.derive(async() => console.log(await data.val))
136 |
137 | const start = () => poll = poll || setInterval(async () => {
138 | const { workflowRun, progress } = await nimbus.workflowRun.pollRun(runId.val)
139 |
140 | if(workflowRun?.status == "success" || workflowRun?.status == "failed" || workflowRun?.status == "terminated") {
141 | // Stop poll
142 | clearInterval(poll)
143 | poll = 0;
144 |
145 | // query output only for failed / succeeded runs
146 | if (workflowRun?.status != "terminated") {
147 | output.val = nimbus.workflowRun.retrieveOutput(runId.val)
148 | }
149 | }
150 |
151 | data.val = Promise.resolve({ workflowRun, progress })
152 | }, 2000)
153 |
154 | start()
155 |
156 | return [
157 | () => div(
158 | div(
159 | div({style: "border-collapse: collapse; width: 100%;"},
160 | div({style: "margin-bottom: 24px"},
161 | button({onclick: () => (activeTab.val = 0, runId.val = null)}, "back"),
162 | ),
163 | Await({
164 | value: data.val,
165 | container: span,
166 | Loading: () => "Loading",
167 | Error: () => "Request failed.",
168 | }, ({workflowRun, progress }) => div({style: 'display: flex; flex-direction: column; gap: 8px'},
169 | div(
170 | b("Status: "),
171 | workflowRun.status,
172 | ),
173 |
174 | progress != null ? div({style: 'display: flex; flex-direction: column; gap: 8px'},
175 | ETA({workflowRun, progress}),
176 |
177 | ProgressBar(progress),
178 |
179 | div(
180 | LoadingButton({onclick:async() => {
181 | await nimbus.workflowRun.cancel(runId.val)
182 | closeDialogWithMessage(
183 | "Successfully terminated",
184 | "You will only be billed for the time your workflow was running."
185 | )
186 |
187 | }}, "Terminate")
188 | ),
189 | ) : "",
190 |
191 |
192 | div(
193 | workflowRun?.status == "success" || workflowRun?.status == "failed" ? div(
194 | output.val == null ?
195 | "Loading output" :
196 | Await({
197 | value: output.val,
198 | container: span,
199 | Loading: () => "Loading output",
200 | Error: () => "Request failed.",
201 | }, data => div(
202 | data?.outputs ? GenerateOutputs(data.outputs) : ""
203 | ))
204 | ) : "",
205 | ),
206 | )
207 | ),
208 | )
209 | )
210 | )
211 | ]
212 | }
213 |
214 |
215 |
216 |
--------------------------------------------------------------------------------
/js/ui/table.js:
--------------------------------------------------------------------------------
1 | import van from '../lib/van.js';
2 | import {Await} from '../lib/van-ui.js';
3 | import { nimbus } from '../resource/index.js';
4 | import { formatTimestamp, formatDuration, compareDates } from '../utils.js';
5 | import workflowState, { WorkflowState } from '../assistant/state.js';
6 |
7 | const Status = {
8 | NOT_STARTED: "not-started",
9 | RUNNING: "running",
10 | SUCCESS: "success",
11 | FAILED: "failed",
12 | UPLOADING: "uploading"
13 | };
14 |
15 | const {div, table, th, tr, td, tbody, thead, span } = van.tags
16 |
17 |
18 | export const WorkflowRunsTable = (activeTab, runId) => {
19 | const data = van.state(nimbus.workflow.retrieve())
20 | const tableColumns = ["Number", "Time", "Duration", "Status"]
21 |
22 | function getFinalStatus(statuses) {
23 | if (statuses.includes(Status.NOT_STARTED)) {
24 | return WorkflowState.PROCESSING;
25 | } else if (statuses.includes(Status.RUNNING)) {
26 | return WorkflowState.RUNNING;
27 | } else if (statuses.every(status => status === Status.SUCCESS)) {
28 | return WorkflowState.FINISHED;
29 | } else if (statuses.includes(Status.FAILED)) {
30 | return WorkflowState.IDLE;
31 | } else {
32 | return Status.IDLE;
33 | }
34 | }
35 |
36 | return [
37 | () => div(
38 | Await({
39 | value: data.val,
40 | container: span,
41 | Loading: () => "Loading...",
42 | Error: () => "Request failed.",
43 |
44 | }, workflow =>
45 | table({style: "border-collapse: collapse; width: 100%;"},
46 | thead({},
47 | tr({style: "background-color: #f4f4f4; color: #333; border-bottom: 2px solid #ddd; text-align: left;"},
48 | tableColumns.map(title =>
49 | th({style: "padding: 12px"},
50 | title
51 | )
52 | )
53 | )
54 | ),
55 |
56 | tbody({style: "padding: 0 10px"},
57 |
58 | workflow.runs.sort(compareDates).map((run, i) => {
59 | const statusToSet = getFinalStatus(["not-started", "running", "success", "failed", "uploading"])
60 | workflowState.setState("workflow", statusToSet)
61 |
62 | return tr({ onclick: () => (activeTab.val = 1, runId.val = run.id), style: "border-bottom: 1px solid #ddd; cursor: pointer;"},
63 | [i, formatTimestamp(run.created_at), formatDuration(run.duration), run.status].map(item =>
64 | td({style: "padding: 12px;"}, item)
65 | )
66 | )
67 | }
68 | )
69 | )
70 | )
71 | )
72 | ),
73 | ]
74 |
75 | }
76 |
77 |
78 |
--------------------------------------------------------------------------------
/js/ui/uploadProgress.js:
--------------------------------------------------------------------------------
1 | import van from '../lib/van.js';
2 | import { Await } from '../lib/van-ui.js';
3 | import { nimbus, local } from '../resource/index.js';
4 | import { pollSyncDependenciesStatus } from '../button/dependencies.js';
5 | import { fileIcon } from './html.js';
6 |
7 | const { h2, img, a, button, div, b, span, source } = van.tags
8 | const { path, svg} = van.tags("http://www.w3.org/2000/svg")
9 |
10 | // Show the first 4 characters, and last 18 characters of this string
11 | function cropString(str) {
12 | const firstThree = str.substring(0, 4);
13 | const lastTen = str.substring(str.length - 36);
14 | const middlePart = '...';
15 | return firstThree + middlePart + lastTen;
16 | }
17 |
18 |
19 | const ProgressBar = (progress) => {
20 | const progressPercentage = 100 - (progress.value/progress.max * 100)
21 | return () => div({style: "height: 18px; width: 100%; background: rgba(255,255,255,.1); transition: all .2s; border-radius: 4px; overflow: hidden;"},
22 | div({style: `width: ${progressPercentage}%; background-color: #1D4AFF; border-radius: 4px;`},
23 | span({style: "font-size: 12px"}, `${progressPercentage.toFixed(2)}%`)
24 | )
25 | )
26 | }
27 |
28 | export const taskId = van.state(null)
29 | export const Progress = (dialogInstance) => {
30 |
31 | const data = van.state(local.pollUploadStatus(taskId.val)) // workflowRun data
32 |
33 | const combineCustomNodeData = (data) => {
34 | const result = {};
35 |
36 | for (const [path, obj] of Object.entries(data)) {
37 | if (path.includes('custom_nodes')) {
38 | const parts = path.split('/');
39 | const nodeIndex = parts.indexOf('custom_nodes') + 1;
40 | const nodePath = parts.slice(0, nodeIndex + 1).join('/');
41 |
42 | if (!result[nodePath]) {
43 | result[nodePath] = { "max": 0, "value": 0 };
44 | }
45 |
46 | result[nodePath]["max"] += obj["max"];
47 | result[nodePath]["value"] += obj["value"];
48 |
49 | } else {
50 | result[path] = obj;
51 | }
52 | }
53 |
54 | return result;
55 | }
56 |
57 | const start = () => dialogInstance.poll = dialogInstance.poll || setInterval(async () => {
58 | const res = await local.pollUploadStatus(taskId.val)
59 |
60 | if(!(
61 | res.status == pollSyncDependenciesStatus.STARTED ||
62 | res.status == pollSyncDependenciesStatus.UPLOADING ||
63 | res.status == pollSyncDependenciesStatus.HASHING)
64 | ) {
65 | // Stop poll
66 | clearInterval(dialogInstance.poll)
67 | dialogInstance.close()
68 | dialogInstance, dialogInstance.poll = 0;
69 | }
70 |
71 | const formattedData = { ...res, progress: combineCustomNodeData(res.progress)}
72 | data.val = Promise.resolve(formattedData)
73 | }, 2500)
74 |
75 | start()
76 |
77 | return () => div({style: 'width: 360px; overflow-y: scroll; height: 480px;'},
78 | Await({
79 | value: data.val,
80 | container: span,
81 | Loading: () => "Loading",
82 | Error: () => "Request failed.",
83 | }, data => div(
84 |
85 | div({style: "margin-bottom: 16px"},
86 | b("Status:"),
87 | data.status
88 | ),
89 |
90 | Object.entries(data.progress).map(([key, val]) => {
91 | return () => div({style: "margin-bottom: 16px;"},
92 | div({style: "display: flex; align-items: center; gap: 8px;"},
93 | div({style: 'width: 12px; color: white;'},
94 | svg({xmlns: "http://www.w3.org/2000/svg", viewBox: "0 0 384 512"},
95 | path({"d": "M320 464c8.8 0 16-7.2 16-16V160H256c-17.7 0-32-14.3-32-32V48H64c-8.8 0-16 7.2-16 16V448c0 8.8 7.2 16 16 16H320zM0 64C0 28.7 28.7 0 64 0H229.5c17 0 33.3 6.7 45.3 18.7l90.5 90.5c12 12 18.7 28.3 18.7 45.3V448c0 35.3-28.7 64-64 64H64c-35.3 0-64-28.7-64-64V64z", "fill": "currentColor"}),
96 | )
97 | ),
98 | span({style: "font-size: 12px"}, b(cropString(key))),
99 | ),
100 |
101 | div({style: "width: 100%"},
102 | ProgressBar({value: val.value, max: val.max})
103 | )
104 | )
105 | })
106 |
107 | )
108 | ))
109 | }
110 |
111 |
112 |
--------------------------------------------------------------------------------
/js/utils.js:
--------------------------------------------------------------------------------
1 | import _ from 'https://cdn.jsdelivr.net/npm/@esm-bundle/lodash@4.17.21/+esm'
2 | import { app } from './comfy/comfy.js';
3 | import { inputDialog } from './comfy/ui.js';
4 | import { getData } from './store.js';
5 |
6 | /**
7 | * HELPERS
8 | *
9 | * File for helper functions, local network api's
10 | */
11 |
12 | export function formatTimestamp(timestamp) {
13 | const date = new Date(timestamp);
14 |
15 | const year = date.getFullYear();
16 | const month = String(date.getMonth() + 1).padStart(2, '0');
17 | const day = String(date.getDate()).padStart(2, '0');
18 | const hour = String(date.getHours()).padStart(2, '0');
19 | const minute = String(date.getMinutes()).padStart(2, '0');
20 | const second = String(date.getSeconds()).padStart(2, '0');
21 |
22 | return `${year}-${month}-${day} ${hour}:${minute}:${second}`;
23 | }
24 |
25 | export function formatDuration(seconds) {
26 | if (seconds < 60) {
27 | return seconds.toFixed(2) + " seconds";
28 | } else if (seconds < 3600) {
29 | return (seconds / 60).toFixed(2) + " minutes";
30 | } else {
31 | return (seconds / 3600).toFixed(2) + " hours";
32 | }
33 | }
34 |
35 | export const compareDates = (a, b) => {
36 | return b.date - a.date; // Sort in descending order (most recent to least recent)
37 | };
38 |
39 |
40 | export function generateUUID() {
41 | let uuid = 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, function(c) {
42 | const r = Math.random() * 16 | 0;
43 | const v = c === 'x' ? r : (r & 0x3 | 0x8);
44 | return v.toString(16);
45 | });
46 | return uuid;
47 | }
48 |
49 |
50 | export const createMetaNode = async () => {
51 | const text = await inputDialog.input(
52 | "Create your deployment",
53 | "Workflow name",
54 | );
55 | if (!text) throw new Error("Node not created");
56 | app.graph.beforeChange();
57 |
58 | var node = LiteGraph.createNode("ComfyCloud");
59 |
60 | node.configure({
61 | widgets_values: [text],
62 | properties: {
63 | workflow_name: text,
64 | }
65 | });
66 | node.pos = [0, 0];
67 |
68 | app.graph.add(node);
69 | app.graph.afterChange();
70 | }
71 |
72 | export const getUser = async () => {
73 | const { user } = await fetch(
74 | '/comfy-cloud/user',
75 | ).then((x) => x.json())
76 |
77 | return user;
78 | }
79 |
80 | export const getApiToken = () => {
81 | const { apiKey } = getData();
82 | return apiKey;
83 | }
84 |
85 | export const getWorkflowName = () => {
86 | let deployMeta = app.graph.findNodesByType("ComfyCloud");
87 | const deployMetaNode = deployMeta[0];
88 | // @todo
89 | // handle no deployMetaNode
90 |
91 | if(deployMetaNode) {
92 | const { workflow_name } = deployMetaNode.properties;
93 | //const name = deployMetaNode.widgets[0].value;
94 | return workflow_name;
95 | } else {
96 | return null
97 | }
98 | }
99 | export const getWorkflowId = () => {
100 | let deployMeta = app.graph.findNodesByType("ComfyCloud");
101 | const deployMetaNode = deployMeta[0];
102 |
103 | // @todo
104 | // handle no deployMetaNode
105 |
106 | if(deployMetaNode) {
107 | const { workflow_id } = deployMetaNode.properties;
108 | //const workflow_id = deployMetaNode.widgets[1].value;
109 | return workflow_id;
110 | } else {
111 | return null
112 | }
113 | }
114 | export const getVersion = () => {
115 | let deployMeta = app.graph.findNodesByType("ComfyCloud");
116 | const deployMetaNode = deployMeta[0];
117 | // @todo
118 | // handle no deployMetaNode
119 |
120 | const { version } = deployMetaNode.properties;
121 | //const version = deployMetaNode.widgets[2].value;
122 | return version;
123 | }
124 |
125 | export const setWorkflowId = (id) => {
126 | let deployMeta = app.graph.findNodesByType("ComfyCloud");
127 | const deployMetaNode = deployMeta[0];
128 | // @todo
129 | // handle no deployMetaNode
130 | //deployMetaNode.widgets[1].value = version;
131 | deployMetaNode.properties.workflow_id = id;
132 | }
133 | export const setVersion = (version) => {
134 | let deployMeta = app.graph.findNodesByType("ComfyCloud");
135 | const deployMetaNode = deployMeta[0];
136 | // @todo
137 | // handle no deployMetaNode
138 | //deployMetaNode.widgets[2].value = version;
139 | deployMetaNode.properties.version = version;
140 | }
141 |
142 | export const isCustomNode = (class_type) => {
143 | const defaultCustomNodes = ["KSampler","KSamplerAdvanced","CheckpointLoader","CheckpointLoaderSimple","VAELoader","LoraLoader","CLIPLoader","ControlNetLoader","DiffControlNetLoader","StyleModelLoader","CLIPVisionLoader","UpscaleModelLoader","CLIPVisionEncode","StyleModelApply","CLIPTextEncode","CLIPSetLastLayer","ConditioningCombine","ConditioningAverage ","ConditioningConcat","ConditioningSetArea","ConditioningSetAreaPercentage","ConditioningSetMask","ControlNetApply","ControlNetApplyAdvanced","VAEEncodeForInpaint","SetLatentNoiseMask","VAEDecode","VAEEncode","LatentRotate","LatentFlip","LatentCrop","EmptyLatentImage","LatentUpscale","LatentUpscaleBy","LatentComposite","LatentBlend","LatentFromBatch","RepeatLatentBatch","SaveImage","PreviewImage","LoadImage","LoadImageMask","ImageScale","ImageScaleBy","ImageUpscaleWithModel","ImageInvert","ImagePadForOutpaint","ImageBatch","VAEDecodeTiled","VAEEncodeTiled"]
144 |
145 | if (defaultCustomNodes.indexOf(class_type) !== -1) {
146 | return true
147 | } else {
148 | return false;
149 | }
150 | }
151 |
152 | export const getCustomNodesList = async () => {
153 | const custom_nodes_list = await fetch("/comfy-cloud/custom-nodes-list", {
154 | method: "get",
155 | }).then((x) => x.json())
156 |
157 | return custom_nodes_list.custom_nodes
158 | }
159 |
160 | export const getNodesInputsOutputs = async () => {
161 | const nodes = await fetch("/comfy-cloud/nodes-inputs-outputs", {
162 | method: "get",
163 | }).then((x) => x.json())
164 |
165 | return nodes.nodes
166 | }
167 |
168 |
169 | // Exec
170 | // returns new changes
171 | export const compareWorkflows = (local, cloud) => {
172 | const changes = _.differenceWith(_.toPairs(local), _.toPairs(cloud), _.isEqual)
173 | //const changes = _.difference(_.toPairs(cloud), _.toPairs(local), _.isEqual)
174 |
175 | const diff = changes.reduce((acc, [key, value]) => {
176 | acc[key] = value;
177 | return acc;
178 | }, {});
179 |
180 | //console.log(local, cloud, diff)
181 | return diff
182 | }
183 |
184 | export async function validatePrompt(workflow_api) {
185 | app.lastNodeErrors = null;
186 |
187 | const body = {
188 | workflow_api,
189 | }
190 |
191 | const data = await fetch("/comfy-cloud/validate-prompt", {
192 | method: "POST",
193 | body: JSON.stringify(body),
194 | }).then((x) => x.json())
195 |
196 | app.lastNodeErrors = data.node_errors;
197 |
198 | if (data.node_errors.length > 0) {
199 | app.canvas.draw(true, true);
200 | }
201 |
202 | if(data.is_valid){
203 | return true;
204 | } else {
205 | return false;
206 | }
207 | }
208 |
209 | export function extractAfterInput(inputString) {
210 | // Check if the string contains 'input'
211 | if (inputString.includes('input')) {
212 | // Find the index of 'input/'
213 | const index = inputString.indexOf('input/');
214 |
215 | // Check if 'input/' is found
216 | if (index !== -1) {
217 | // Return the substring after 'input/'
218 | return inputString.substring(index + 'input/'.length);
219 | } else {
220 | throw new Error("Path is not in input folder")
221 |
222 | }
223 | }
224 |
225 | // Return null if 'input' is not found or 'input/' is not found
226 | return null;
227 | }
228 |
229 |
230 | export const isWorkflowUpToDate = diffDeps => _.isEmpty(diffDeps);
231 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "comfyui-clouds",
3 | "version": "1.0.0",
4 | "description": "",
5 | "main": "/js",
6 | "scripts": {
7 | "lint-staged": "lint-staged",
8 | "prepare": "husky"
9 | },
10 | "husky": {
11 | "hooks": {
12 | "pre-commit": "npm run lint-staged"
13 | }
14 | },
15 | "lint-staged": {
16 | "*.{js,jsx,ts,tsx}": [
17 | "./node_modules/.bin/eslint --fix"
18 | ]
19 | },
20 | "author": "",
21 | "license": "ISC",
22 | "devDependencies": {
23 | "eslint": "^8.57.0",
24 | "husky": "^9.0.11",
25 | "lint-staged": "^15.2.2"
26 | }
27 | }
28 |
--------------------------------------------------------------------------------
/prestartup_script.py:
--------------------------------------------------------------------------------
1 | import datetime
2 | import os
3 | import subprocess
4 | import sys
5 | import atexit
6 | import threading
7 | import logging
8 | import importlib.util
9 |
10 |
11 | from logging.handlers import RotatingFileHandler
12 |
13 | # Running with export CD_ENABLE_LOG=true; python main.py
14 |
15 | # Check for 'cd-enable-log' flag in input arguments
16 | # cd_enable_log = '--cd-enable-log' in sys.argv
17 | cd_enable_log = os.environ.get('CD_ENABLE_LOG', 'false').lower() == 'true'
18 |
19 | def setup():
20 | handler = RotatingFileHandler('comfy-cloud.log', maxBytes=500000, backupCount=5)
21 |
22 | original_stdout = sys.stdout
23 | original_stderr = sys.stderr
24 |
25 | class StreamToLogger():
26 | def __init__(self, log_level):
27 | self.log_level = log_level
28 |
29 | def write(self, buf):
30 | if (self.log_level == logging.INFO):
31 | original_stdout.write(buf)
32 | original_stdout.flush()
33 | elif (self.log_level == logging.ERROR):
34 | original_stderr.write(buf)
35 | original_stderr.flush()
36 |
37 | for line in buf.rstrip().splitlines():
38 | handler.handle(
39 | logging.LogRecord(
40 | name="comfy-cloud",
41 | level=self.log_level,
42 | pathname="prestartup_script.py",
43 | lineno=1,
44 | msg=line.rstrip(),
45 | args=None,
46 | exc_info=None
47 | )
48 | )
49 |
50 | def flush(self):
51 | if (self.log_level == logging.INFO):
52 | original_stdout.flush()
53 | elif (self.log_level == logging.ERROR):
54 | original_stderr.flush()
55 |
56 | # Redirect stdout and stderr to the logger
57 | sys.stdout = StreamToLogger(logging.INFO)
58 | sys.stderr = StreamToLogger(logging.ERROR)
59 |
60 | if cd_enable_log:
61 | print("** Comfy Cloud logging enabled")
62 | setup()
63 |
64 | import subprocess
65 | import os
66 |
67 | def is_git_up_to_date():
68 | try:
69 | # Run git fetch to update remote branches
70 | subprocess.run(["git", "fetch"])
71 |
72 | # Check if the local branch is behind the remote branch
73 | result = subprocess.run(["git", "status", "-uno"], capture_output=True, text=True)
74 | output = result.stdout
75 |
76 | # If "Your branch is up to date" is found in the output, the repository is up to date
77 | if "Your branch is up to date" in output:
78 | return True
79 | else:
80 | return False
81 | except Exception as e:
82 | print("Error:", e)
83 | return False
84 |
85 | def pull_latest():
86 | try:
87 | # Run git pull to fetch and merge changes from the remote repository
88 | subprocess.run(["git", "pull"])
89 | print("Comfy Cloud repository is up to date.")
90 | except Exception as e:
91 | print("Error:", e)
92 |
93 | def check_and_install_packages(package_names):
94 | for package_name in package_names:
95 | if not importlib.util.find_spec(package_name):
96 | print(f"[Comfy Cloud] {package_name} is not installed. Installing...")
97 | subprocess.check_call([sys.executable, "-m", "pip", "install", package_name])
98 |
99 | # Change the current working directory to the script's directory
100 | script_dir = os.path.dirname(os.path.abspath(__file__))
101 | os.chdir(script_dir)
102 | if is_git_up_to_date():
103 | print("Comfy Cloud is up to date.")
104 | else:
105 | print("Comfy Cloud is not up to date. Pulling latest changes...")
106 | pull_latest()
107 |
108 | # Check if requirements is fulfilled
109 | package_names = ["modal","synchronicity", "aiostream"]
110 | check_and_install_packages(package_names)
111 |
112 |
--------------------------------------------------------------------------------
/python/chat/classes.py:
--------------------------------------------------------------------------------
1 | from dataclasses import dataclass, field
2 | from typing import List, Dict, Any
3 | from nodes import NODE_CLASS_MAPPINGS
4 |
5 | @dataclass
6 | class LLMGeneratedGraph:
7 | nodes: dict
8 | edges: dict
9 |
10 | def get_last_link_id(self):
11 | return len(self.edges)
12 |
13 | def get_last_node_id(self):
14 | return len(self.edges)
15 |
16 |
17 | # TODO: move to inside ComfyUIWorkflowGraphNodeClass
18 | def _get_widget_values(node):
19 | class_type = node['class_type']
20 | registered_node = NODE_CLASS_MAPPINGS[class_type]()
21 |
22 | node_widget_values = []
23 | if 'widget_values' in node:
24 | node_widget_values = node['widget_values']
25 |
26 | registered_node_inputs_dict = registered_node.INPUT_TYPES()["required"] # dictionary
27 | final_widget_values = {}
28 |
29 | # ComfyUI represents a dropdown widget value with
30 | # index 0 of the tuple as a list
31 | def is_list_selection_widget_value(value):
32 | return type(value[0]) == list
33 |
34 | # Prepare the widget values dictionary for the registered custom node's input types
35 | registered_node_widget_values = {}
36 | for key, value in registered_node_inputs_dict.items():
37 | if len(value) > 1 or is_list_selection_widget_value(value):
38 | registered_node_widget_values[key] = value
39 |
40 | # Build widget values dictionary
41 | for key, value in registered_node_widget_values.items():
42 | if key in node_widget_values:
43 | # If the key exists in the LLM generated output, use that value
44 | final_widget_values[key] = node_widget_values[key]
45 |
46 | else:
47 | # Load final_widget_values with the default
48 | if is_list_selection_widget_value(value):
49 | # If it is a dropdown, just choose the first value of the list
50 | final_widget_values[key] = value[0][0]
51 | else:
52 | final_widget_values[key] = value[1]["default"]
53 |
54 | final_widget_values_list = list(final_widget_values.values())
55 |
56 | # edge case:
57 | # For some reason, KSampler widget values do not account for 'control_after_generate'.
58 | if node['class_type'] == "KSampler":
59 | final_widget_values_list.insert(1, "randomize")
60 |
61 |
62 | return final_widget_values_list
63 |
64 |
65 |
66 | @dataclass
67 | class ComfyUIWorkflowGraphNode:
68 | id: int
69 | type: str
70 | order: int
71 | mode: int = 0
72 | pos: List[int] = field(default_factory=list)
73 | flags: Dict[str, Any] = field(default_factory=dict)
74 | inputs: List[object] = field(default_factory=list)
75 | outputs: List[object] = field(default_factory=list)
76 | properties: Dict[str, Any] = field(default_factory=dict)
77 | widgets_values: List[Any] = field(default_factory=list)
78 |
79 | # This is the object from NODE_CLASS_MAPPINGS and the
80 | # internal object used by ComfyUI
81 | _registered_node: Any = field(init=False)
82 |
83 | def __post_init__(self):
84 | node = NODE_CLASS_MAPPINGS[self.type]()
85 | self._registered_node = node
86 |
87 | # Prepare input types
88 | registered_node_inputs_dict = self._registered_node.INPUT_TYPES()["required"] # dictionary
89 | registered_node_inputs_list = list(registered_node_inputs_dict.keys())
90 |
91 | for index, (key, val) in enumerate(registered_node_inputs_dict.items()):
92 | if len(val) > 1 or type(val[0]) == list:
93 | continue
94 |
95 | self.inputs.append({
96 | 'name': key,
97 | 'type': val[0],
98 | 'link': None,
99 | })
100 |
101 | # Prepare return types
102 | registered_node_outputs_tuple = self._registered_node.RETURN_TYPES
103 | for index, ret_typ in enumerate(registered_node_outputs_tuple):
104 | self.outputs.append({
105 | 'name': ret_typ,
106 | 'type': ret_typ,
107 | 'links': [],
108 | 'slot_index': index
109 | })
110 |
111 |
112 | def __pre_serialize__(self):
113 | # _registered_node is a class from nodes.py
114 | # and does not need to be sent to the front end
115 | self._registered_node = None
116 |
117 | def _get_output_type_from_slot_index(self, slot_index):
118 | registered_node = self._registered_node
119 | registered_node_outputs_tuple = registered_node.RETURN_TYPES
120 | print(registered_node_outputs_tuple, slot_index)
121 | return registered_node_outputs_tuple[slot_index]
122 |
123 | def _get_input_slot_index_from_name(self, key):
124 | registered_node = self._registered_node
125 | registered_node_inputs_dict = registered_node.INPUT_TYPES()["required"] # dictionary
126 | registered_node_inputs_list = list(registered_node_inputs_dict.keys())
127 |
128 | print(registered_node_inputs_list)
129 |
130 | # find the index of the key in the list
131 | return registered_node_inputs_list.index(key)
132 |
133 | def add_input(self, name: str, typ: str, link: int):
134 | # find by name
135 | for input in self.inputs:
136 | if input['name'] == name:
137 | input['link'] = link
138 |
139 |
140 | def add_output(self, link_id, slot_index):
141 | # Append link_id to existing slot index if it exists
142 | for output in self.outputs:
143 | if output['slot_index'] == slot_index:
144 | output['links'].append(link_id)
145 |
146 |
147 | @dataclass
148 | class ComfyUIWorkflowGraphLink:
149 | id: int
150 | from_node_id: int
151 | from_node_output_slot: int
152 | to_node_id: int
153 | to_node_input_slot: int
154 | from_node_output_type: str
155 |
156 | def to_list(self):
157 | return [
158 | self.id,
159 | self.from_node_id,
160 | self.from_node_output_slot,
161 | self.to_node_id,
162 | self.to_node_input_slot,
163 | self.from_node_output_type
164 | ]
165 |
166 | @dataclass
167 | class ComfyUIWorkflowGraph:
168 | """
169 | This is the structure accepted by ComfyUI's LoadGraph method.
170 | """
171 |
172 | last_node_id: int
173 | last_link_id: int
174 | nodes: List[ComfyUIWorkflowGraphNode] = field(default_factory=list)
175 | links: List[object] = field(default_factory=list)
176 | groups: List[object] = field(default_factory=list)
177 | config: Dict[str, object] = field(default_factory=dict)
178 | extra: Dict[str, object] = field(default_factory=dict)
179 | version: float = 0.4
180 |
181 | # For easy access to nodes by id
182 | _nodes_dict: Dict[str, int] = field(init=False, default_factory=dict)
183 |
184 | def add_node(self, node: ComfyUIWorkflowGraphNode):
185 | self.nodes.append(node)
186 | node_index = len(self.nodes) - 1
187 | self._nodes_dict[node.id] = node_index
188 |
189 | def add_link(self, link: ComfyUIWorkflowGraphLink):
190 | self.links.append(link.to_list())
191 |
192 | def get_node_by_id(self, node_id):
193 | index = self._nodes_dict.get(node_id, None)
194 | node = self.nodes[index]
195 | return node
196 |
197 |
198 |
--------------------------------------------------------------------------------
/python/chat/comfy_types.py:
--------------------------------------------------------------------------------
1 | from nodes import NODE_CLASS_MAPPINGS
2 |
3 | def get_inputs_outputs():
4 | node_inputs_outputs = {}
5 |
6 | for key in NODE_CLASS_MAPPINGS:
7 | data = NODE_CLASS_MAPPINGS[key]()
8 | input_types = data.INPUT_TYPES()
9 | output_types = data.RETURN_TYPES
10 |
11 | if "required" in input_types:
12 | node_inputs_outputs[key] = {
13 | "input_types": input_types["required"],
14 | "output_types": output_types
15 | }
16 |
17 | return node_inputs_outputs
18 |
19 | def prepare_input_output_types():
20 | all_input_output_types = {}
21 | for key, value in input_output_types.items():
22 | class_inputs = value["input_types"]
23 | class_outputs = value["output_types"]
24 |
25 | for key, value in class_inputs.items():
26 | arg = key
27 | typ = value[0]
28 | all_input_output_types[arg] = typ
29 | return all_input_output_types
30 |
31 | def get_type_from_input_name(input_name):
32 | return all_input_output_types[input_name]
33 |
34 |
35 | input_output_types = get_inputs_outputs()
36 | all_input_output_types = prepare_input_output_types()
37 |
--------------------------------------------------------------------------------
/python/chat/format.py:
--------------------------------------------------------------------------------
1 | import dataclasses
2 | import json
3 | from .classes import ComfyUIWorkflowGraph
4 |
5 | def preprocess_bot_response(bot_response):
6 | # Remove triple ticks
7 | bot_response = bot_response.replace("```json", "")
8 | bot_response = bot_response.replace("```", "")
9 | return json.loads(bot_response)
10 |
11 | def postprocess(graph: ComfyUIWorkflowGraph) -> dict:
12 | # Clean up intermediate values
13 | for node in graph.nodes:
14 | node.__pre_serialize__()
15 |
16 | serialized = dataclasses.asdict(graph)
17 | return serialized
18 |
19 |
20 |
--------------------------------------------------------------------------------
/python/chat/graph.py:
--------------------------------------------------------------------------------
1 | from .classes import ComfyUIWorkflowGraph, ComfyUIWorkflowGraphNode, ComfyUIWorkflowGraphLink, LLMGeneratedGraph, _get_widget_values
2 | from .comfy_types import input_output_types, get_type_from_input_name
3 | from nodes import NODE_CLASS_MAPPINGS
4 |
5 |
6 | def transform_input_to_graph(llm_graph) -> ComfyUIWorkflowGraph:
7 | llm_graph = LLMGeneratedGraph(
8 | nodes=llm_graph["nodes"],
9 | edges=llm_graph["edges"]
10 | )
11 | last_node_id = llm_graph.get_last_node_id()
12 | last_link_id = llm_graph.get_last_link_id()
13 |
14 | comfy_graph = ComfyUIWorkflowGraph(
15 | last_node_id=llm_graph.get_last_node_id(),
16 | last_link_id=llm_graph.get_last_link_id(),
17 | )
18 |
19 | # Add nodes
20 | for index, node in enumerate(llm_graph.nodes):
21 | widget_values = _get_widget_values(node)
22 | new_node = ComfyUIWorkflowGraphNode(
23 | id=node["id"],
24 | type=node["class_type"],
25 | order=index,
26 | widgets_values=widget_values
27 | )
28 |
29 | comfy_graph.add_node(new_node)
30 |
31 | for index, edge in enumerate(llm_graph.edges):
32 | link_id = index + 1
33 | to_node = comfy_graph.get_node_by_id(edge['to'])
34 | from_node = comfy_graph.get_node_by_id(edge['from'])
35 |
36 | to_node.add_input(
37 | edge['to_input'],
38 | get_type_from_input_name(edge['to_input']),
39 | link_id,
40 | )
41 | from_node.add_output(
42 | link_id,
43 | edge['from_output']
44 | )
45 |
46 | # Create links
47 | link = ComfyUIWorkflowGraphLink(
48 | id=link_id,
49 | from_node_id=edge['from'],
50 | from_node_output_slot=edge['from_output'],
51 | to_node_id=edge['to'],
52 | to_node_input_slot=to_node._get_input_slot_index_from_name(edge['to_input']),
53 | from_node_output_type=from_node._get_output_type_from_slot_index(edge['from_output'])
54 | )
55 |
56 | comfy_graph.add_link(link)
57 |
58 |
59 | return comfy_graph
60 |
--------------------------------------------------------------------------------
/python/custom_routes.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import base64
3 | import concurrent.futures
4 | import copy
5 | import os
6 | import uuid
7 | import time
8 | import random
9 | import requests
10 | import sys
11 | import datetime
12 | from aiohttp import web
13 | from .user import load_user_profile
14 |
15 | import server
16 | import execution
17 | import folder_paths
18 |
19 | from .upload import upload_file_specs
20 | from .upload.spec import FileSpecContextManager
21 | from .upload.progress import progress_dict, reset_progress
22 |
23 | from .utils.paths import build_paths
24 | from .utils.task import task_create, task_set_status, task_set_progress, task_get_by_id, task_serialize, TaskStatus
25 |
26 | from .utils.requirements import update_requirements
27 | from .utils.custom_nodes import get_custom_node_list_silent
28 |
29 | from .chat.graph import transform_input_to_graph
30 | from .chat.format import preprocess_bot_response, postprocess
31 |
32 |
33 | @server.PromptServer.instance.routes.post("/comfy-cloud/validate-input-path")
34 | async def validate_input_path(request):
35 | try:
36 | data = await request.json()
37 | input_paths = data.get("paths")
38 |
39 | paths_not_found = []
40 | base = folder_paths.base_path
41 | for path in input_paths:
42 | full_path = os.path.join(base, 'input', path)
43 |
44 | if not os.path.exists(full_path):
45 | paths_not_found.append(path)
46 |
47 | return web.json_response({ "invalid_paths": paths_not_found }, status=200)
48 | except Exception as e:
49 | print("Error:", e)
50 | return web.json_response({ "error": e }, status=400)
51 |
52 | @server.PromptServer.instance.routes.post("/comfy-cloud/validate-path")
53 | async def validate_path_directory(request):
54 | try:
55 | data = await request.json()
56 | input_path = data.get("path")
57 |
58 | exists = os.path.exists(input_path)
59 |
60 | return web.json_response({ "exists": exists }, status=200)
61 | except Exception as e:
62 | print("Error:", e)
63 | return web.json_response({ "error": e }, status=400)
64 |
65 | @server.PromptServer.instance.routes.post("/comfy-cloud/validate-prompt")
66 | async def comfy_cloud_validate_prompt(request):
67 | data = await request.json()
68 |
69 | workflow_api = data.get("workflow_api")
70 |
71 | def random_seed(num_digits=15):
72 | range_start = 10 ** (num_digits - 1)
73 | range_end = (10**num_digits) - 1
74 | return random.randint(range_start, range_end)
75 |
76 | for key in workflow_api:
77 | if 'inputs' in workflow_api[key] and 'seed' in workflow_api[key]['inputs']:
78 | workflow_api[key]['inputs']['seed'] = random_seed()
79 |
80 | try:
81 | valid = execution.validate_prompt(workflow_api)
82 |
83 | if valid[0]:
84 | return web.json_response({ "is_valid": True, "node_errors": valid[3] }, status=200)
85 | else:
86 | return web.json_response({ "is_valid": False, "node_errors": valid[3] }, status=200)
87 |
88 | except Exception as e:
89 | print("Error:", e)
90 | return web.json_response({ "error": e }, status=400)
91 |
92 |
93 | @server.PromptServer.instance.routes.get("/comfy-cloud/user")
94 | async def comfy_cloud_run(request):
95 | try:
96 | data = load_user_profile()
97 | return web.json_response({
98 | "user": data
99 | }, content_type='application/json')
100 | except Exception as e:
101 | print("Error:", e)
102 | return web.json_response({ "error": e }, status=400)
103 |
104 | @server.PromptServer.instance.routes.get("/comfy-cloud/custom-nodes-list")
105 | async def get_custom_nodes_list(request):
106 | custom_nodes = get_custom_node_list_silent()
107 | return web.json_response({'custom_nodes': custom_nodes}, content_type='application/json')
108 |
109 | @server.PromptServer.instance.routes.post("/comfy-cloud/send-message")
110 | async def chat(request):
111 | try:
112 | json_data = await request.json()
113 | auth_header = request.headers.get('Authorization')
114 |
115 | message = json_data["message"]
116 | origin = "Comfy Cloud"
117 | url = "https://comfyui-cloud-a0cf78bd8c3d.herokuapp.com/chat/send-message"
118 | payload = {
119 | "message": message,
120 | "origin": origin
121 | }
122 | headers = {
123 | "Content-Type": "application/json",
124 | "Authorization": auth_header
125 | }
126 |
127 | response_data = None
128 | response = requests.post(url, json=payload, headers=headers)
129 | if response.status_code == 200:
130 | response_data = response.json()
131 | else:
132 | raise Exception("Failed to send message to API")
133 |
134 | bot_response = response_data["responses"]["bot"]
135 |
136 | # Parse response
137 | bot_response = preprocess_bot_response(bot_response)
138 |
139 | # Construct comfyui node
140 | graph = transform_input_to_graph(bot_response)
141 | data = postprocess(graph)
142 | response = requests.post(url, json=payload, headers=headers)
143 |
144 | return web.json_response({'nodes': data}, content_type='application/json')
145 |
146 | except Exception as e:
147 | print("Error:", e)
148 | return web.json_response({ "error": e }, status=400)
149 |
150 |
151 | async def upload_task_execution(task_id, file_specs, workflow_id):
152 | try:
153 | task_set_status(task_id, TaskStatus.HASHING)
154 | await upload_file_specs(
155 | file_specs,
156 | workflow_id,
157 | hashing_complete_callback = lambda: task_set_status(task_id, TaskStatus.UPLOADING),
158 | )
159 |
160 | # cleanup temp
161 | task_set_status(task_id, TaskStatus.COMPLETED)
162 | except Exception as e:
163 | print("Upload task execution error:", e)
164 | task_set_status(task_id, TaskStatus.ERROR)
165 |
166 | finally:
167 | reset_progress(progress_dict)
168 |
169 |
170 | @server.PromptServer.instance.routes.post("/comfy-cloud/upload")
171 | async def upload_dependencies(request):
172 | # Make a request to localhost
173 | try:
174 | json_data = await request.json()
175 | workflow_id = json_data["workflow_id"]
176 | base = folder_paths.base_path
177 |
178 | # Paths
179 | paths_to_upload = {
180 | "models": os.path.join(base, "models"),
181 | "custom_nodes": os.path.join(base, "custom_nodes"),
182 | "input": os.path.join(base, "input")
183 | }
184 | dep_lists = {
185 | "models": json_data["modelsToUpload"],
186 | "custom_nodes": json_data["nodesToUpload"],
187 | "input": json_data["filesToUpload"]
188 | }
189 |
190 | # Create upload task
191 | task_id = task_create()
192 |
193 | # Our server uses a custom dependency manager
194 | # that requires a specific format for requirements.txt.
195 | # Loop through all dependent custom nodes and patch.
196 | update_requirements()
197 |
198 | # Get dependency paths
199 | paths = build_paths(paths_to_upload, dep_lists, workflow_id)
200 |
201 | # Generate file specs for upload
202 | file_specs = []
203 |
204 | with FileSpecContextManager(file_specs) as batch:
205 | for path in paths:
206 | local_path = path[0]
207 | remote_path = path[1]
208 | if os.path.isfile(local_path):
209 | batch.put_file(local_path, remote_path)
210 | elif os.path.isdir(local_path):
211 | batch.put_directory(local_path, remote_path)
212 | else:
213 | raise Exception("Something went wrong")
214 |
215 | print("Generating specs")
216 | file_specs = batch.generate_specs()
217 |
218 | # Finally, we queue upload task in background
219 | asyncio.ensure_future(upload_task_execution(task_id, file_specs, workflow_id))
220 |
221 | return web.json_response({'success': True, 'task_id': task_id}, content_type='application/json')
222 | except Exception as e:
223 | print("Error", e)
224 | return web.json_response({'success': False, 'message': str(e)}, status=500, content_type='application/json')
225 |
226 | @server.PromptServer.instance.routes.get("/comfy-cloud/upload-status/{task_id}")
227 | async def get_task_status(request):
228 | task_id = request.match_info['task_id']
229 |
230 | task_set_progress(task_id, progress_dict)
231 | task = task_get_by_id(task_id)
232 |
233 | return web.json_response(task_serialize(task))
234 |
235 |
236 |
--------------------------------------------------------------------------------
/python/test.py:
--------------------------------------------------------------------------------
1 | import os
2 | import shutil
3 | import subprocess
4 | from pathlib import Path
5 | from utils.paths import build_paths
6 | import pprint
7 |
8 | """
9 | For testing paths.py
10 | """
11 |
12 | current_directory = os.getcwd()
13 | _temp_dir = os.path.join(current_directory, "_temp")
14 |
15 | test_file = os.path.join(_temp_dir, "test_file")
16 | test_dir = os.path.join(_temp_dir, "test_dir")
17 |
18 | def create_test_files():
19 | if os.path.isdir(_temp_dir):
20 | cleanup_test_files()
21 |
22 | create_temp = ["mkdir", _temp_dir]
23 | subprocess.run(create_temp)
24 |
25 | # Make fake models dir
26 | directories = ["models", "input", "custom_nodes", "custom_nodes/ComfyUI-AnimateDiffEvolved", "models/checkpoints", "models/checkpoints/segm", "models/controlnet", "models/vae"]
27 | for directory in directories:
28 | os.makedirs(os.path.join(_temp_dir, directory), exist_ok=True)
29 |
30 | Path(os.path.join(_temp_dir, "models/checkpoints", "test_checkpoint")).touch()
31 | Path(os.path.join(_temp_dir, "models/checkpoints/segm", "segm_test")).touch()
32 | Path(os.path.join(_temp_dir, "models/controlnet", "test_controlnet")).touch()
33 | Path(os.path.join(_temp_dir, "custom_nodes/ComfyUI-AnimateDiffEvolved", "__init__.py")).touch()
34 | Path(os.path.join(_temp_dir, "input", "test_input1")).touch()
35 | Path(os.path.join(_temp_dir, "input", "test_input2")).touch()
36 |
37 |
38 | def cleanup_test_files():
39 | shutil.rmtree(_temp_dir)
40 |
41 | def can_find_paths():
42 | try:
43 |
44 | # Paths
45 | base = _temp_dir
46 | paths_to_upload = {
47 | "models": os.path.join(base, "models"),
48 | "custom_nodes": os.path.join(base, "custom_nodes"),
49 | "input": os.path.join(base, "input")
50 | }
51 | dep_lists = {
52 | "models": ["segm/segm_test", "test_controlnet"],
53 | "custom_nodes": ["ComfyUI-AnimateDiffEvolved"],
54 | "input": ["test_input1"]
55 | }
56 |
57 | paths = build_paths(paths_to_upload, dep_lists, "0")
58 |
59 | for path in paths:
60 | local_path = path[0]
61 | remote_path = path[1]
62 |
63 | if os.path.isfile(local_path):
64 | print(f"Putting file \n from: {local_path}\n to: {remote_path}")
65 | if os.path.isdir(local_path):
66 | print(f"Putting dir \n from: {local_path}\n to: {remote_path}")
67 |
68 | assert os.path.exists(local_path)
69 |
70 |
71 | print("TEST SUCCESSFUL")
72 |
73 | except Exception as e:
74 | print("Error:",e)
75 |
76 |
77 | create_test_files()
78 | can_find_paths()
79 | cleanup_test_files()
80 |
--------------------------------------------------------------------------------
/python/upload/__init__.py:
--------------------------------------------------------------------------------
1 | import io
2 | import os
3 | import asyncio
4 | import aiostream
5 | import json
6 | from typing import List, Callable
7 |
8 | from .spec import FileSpecContextManager, serialize_spec, FileUploadSpec
9 | from .blob import blob_upload
10 | from .hash import get_upload_hashes
11 | from .net import make_post_request
12 |
13 | base_url = "https://storage.comfyui-cloud.com"
14 |
15 | async def upload_file_specs(
16 | file_specs: List[FileUploadSpec],
17 | workflow_id: str,
18 | hashing_complete_callback: Callable = None
19 | ):
20 | """
21 | Take in a list of specs, and uploads them
22 | """
23 |
24 | # Send dict version of file specs to client
25 | serialize_spec_stream = aiostream.stream.map(file_specs, serialize_spec, task_limit=20)
26 | serialized_specs = await aiostream.stream.list(serialize_spec_stream)
27 | serialized_specs_dict = {}
28 |
29 | # Adds id to list, as well as creates a dict version
30 | for idx, spec in enumerate(serialized_specs):
31 | spec["id"] = idx
32 | serialized_specs_dict[idx] = spec
33 |
34 | # Check serialized specs
35 | max_size = 50 * 1024 * 1024 # 50 MB in bytes
36 | json_data = json.dumps(serialized_specs)
37 | json_size = len(json_data.encode('utf-8'))
38 | if json_size > max_size:
39 | raise Exception(f'Workflow too large error. File specs size exceeds the 50MB limit: {data_size} bytes')
40 |
41 | if hashing_complete_callback is not None:
42 | hashing_complete_callback()
43 |
44 | print("Uploading dependencies to cloud.")
45 |
46 | # Get a list of blob_ids from the client
47 | url = f'{base_url}/upload-urls'
48 | response_data = await make_post_request(url, { "specs": serialized_specs, "workflow_id": workflow_id })
49 | upload_data = response_data["data"]
50 |
51 | # Create the upload data
52 | # Mix the response data from the server and the file spec
53 | # and pass it to the generator
54 | for spec in upload_data:
55 | if spec["id"] in serialized_specs_dict:
56 | spec["data"] = {
57 | **serialized_specs_dict[spec["id"]],
58 | **spec["data"]
59 | }
60 |
61 | def gen_upload_providers():
62 | for gen in upload_data:
63 | yield gen
64 |
65 | async def _upload_and_commit(spec):
66 | await blob_upload(spec)
67 |
68 | # Blob upload has finished. Put the spec
69 | # on the queue to be committed
70 | url = f'{base_url}/commit'
71 | response_data = await make_post_request(url, { "spec": spec })
72 |
73 | files_stream = aiostream.stream.iterate(gen_upload_providers())
74 | uploads_stream = aiostream.stream.map(files_stream, _upload_and_commit, task_limit=20)
75 | files = await aiostream.stream.list(uploads_stream)
76 | print("Successfully uploaded dependent models and custom nodes to cloud.")
77 |
78 |
--------------------------------------------------------------------------------
/python/upload/blob.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import hashlib
3 | import io
4 | import os
5 | from contextlib import contextmanager
6 | from pathlib import Path, PurePosixPath
7 | from typing import Any, AsyncIterator, BinaryIO, Callable, Dict, List, Optional, Union
8 | from urllib.parse import urlparse
9 |
10 | from aiohttp import BytesIOPayload
11 | from aiohttp.abc import AbstractStreamWriter
12 |
13 | from modal.exception import ExecutionError
14 | from .sync import retry
15 | from .net import http_client_with_tls, retry_transient_errors
16 |
17 | from tqdm import tqdm
18 | import logging
19 | import uuid
20 |
21 | #progress = {}
22 | from .progress import progress_update
23 |
24 | logger = logging.getLogger()
25 |
26 |
27 | # Max size for function inputs and outputs.
28 | MAX_OBJECT_SIZE_BYTES = 2 * 1024 * 1024 # 2 MiB
29 |
30 | # If a file is LARGE_FILE_LIMIT bytes or larger, it's uploaded to blob store (s3) instead of going through grpc
31 | # It will also make sure to chunk the hash calculation to avoid reading the entire file into memory
32 | LARGE_FILE_LIMIT = 4 * 1024 * 1024 # 4 MiB
33 |
34 | # Max parallelism during map calls
35 | BLOB_MAX_PARALLELISM = 10
36 |
37 | # read ~16MiB chunks by default
38 | DEFAULT_SEGMENT_CHUNK_SIZE = 2**24
39 |
40 |
41 | class BytesIOSegmentPayload(BytesIOPayload):
42 | """Modified bytes payload for concurrent sends of chunks from the same file.
43 |
44 | Adds:
45 | * read limit using remaining_bytes, in order to split files across streams
46 | * larger read chunk (to prevent excessive read contention between parts)
47 | * calculates an md5 for the segment
48 |
49 | Feels like this should be in some standard lib...
50 | """
51 |
52 | def __init__(
53 | self,
54 | bytes_io: BinaryIO, # should *not* be shared as IO position modification is not locked
55 | segment_start: int,
56 | segment_length: int,
57 | filename: str,
58 | chunk_size: int = DEFAULT_SEGMENT_CHUNK_SIZE,
59 | ):
60 | # not thread safe constructor!
61 | super().__init__(bytes_io)
62 | self.initial_seek_pos = bytes_io.tell()
63 | self.segment_start = segment_start
64 | self.segment_length = segment_length
65 | # seek to start of file segment we are interested in, in order to make .size() evaluate correctly
66 | self._value.seek(self.initial_seek_pos + segment_start)
67 | assert self.segment_length <= super().size
68 | self.chunk_size = chunk_size
69 | self.reset_state()
70 | self.filename = filename
71 |
72 | def reset_state(self):
73 | self._md5_checksum = hashlib.md5()
74 | self.num_bytes_read = 0
75 | self._value.seek(self.initial_seek_pos)
76 |
77 | @contextmanager
78 | def reset_on_error(self):
79 | try:
80 | yield
81 | finally:
82 | self.reset_state()
83 |
84 | @property
85 | def filename(self):
86 | return self._filename
87 |
88 | @filename.setter
89 | def filename(self, value):
90 | self._filename = value
91 |
92 | @property
93 | def size(self) -> int:
94 | return self.segment_length
95 |
96 | def md5_checksum(self):
97 | return self._md5_checksum
98 |
99 | async def write(self, writer: AbstractStreamWriter):
100 | loop = asyncio.get_event_loop()
101 |
102 | async def safe_read():
103 | read_start = self.initial_seek_pos + self.segment_start + self.num_bytes_read
104 | self._value.seek(read_start)
105 | num_bytes = min(self.chunk_size, self.remaining_bytes())
106 | chunk = await loop.run_in_executor(None, self._value.read, num_bytes)
107 |
108 | await loop.run_in_executor(None, self._md5_checksum.update, chunk)
109 | self.num_bytes_read += len(chunk)
110 | return chunk
111 |
112 | chunk = await safe_read()
113 | while chunk and self.remaining_bytes() > 0:
114 | await writer.write(chunk)
115 | chunk = await safe_read()
116 | if chunk:
117 | await writer.write(chunk)
118 |
119 | def remaining_bytes(self):
120 | """
121 | print("Remaining bytes", self.filename, self.segment_length - self.num_bytes_read)
122 | if self.filename not in progress:
123 | progress[self.filename] = {}
124 |
125 | progress[self.filename]["value"] = self.segment_length - self.num_bytes_read
126 | if "max" not in progress[self.filename]:
127 | progress[self.filename]["max"] = self.segment_length - self.num_bytes_read
128 | """
129 |
130 | progress_update(filename=self.filename, value=self.segment_length - self.num_bytes_read, max=self.segment_length)
131 | return self.segment_length - self.num_bytes_read
132 |
133 |
134 | @retry(n_attempts=5, base_delay=0.5, timeout=None)
135 | async def _upload_to_s3_url(
136 | upload_url,
137 | payload: BytesIOSegmentPayload,
138 | content_md5_b64: Optional[str] = None,
139 | content_type: Optional[str] = "application/octet-stream", # set to None to force omission of ContentType header
140 | ) -> str:
141 | """Returns etag of s3 object which is a md5 hex checksum of the uploaded content"""
142 | with payload.reset_on_error(): # ensure retries read the same data
143 | async with http_client_with_tls(timeout=None) as session:
144 | headers = {}
145 | if content_md5_b64 and use_md5(upload_url):
146 | headers["Content-MD5"] = content_md5_b64
147 | if content_type:
148 | headers["Content-Type"] = content_type
149 |
150 |
151 | #print("Uploading to s3", upload_url, payload)
152 | async with session.put(
153 | upload_url,
154 | data=payload,
155 | headers=headers,
156 | skip_auto_headers=["content-type"] if content_type is None else [],
157 | ) as resp:
158 |
159 | # S3 signal to slow down request rate.
160 | if resp.status == 503:
161 | logger.warning("Received SlowDown signal from S3, sleeping for 1 second before retrying.")
162 | await asyncio.sleep(1)
163 |
164 | if resp.status != 200:
165 | try:
166 | text = await resp.text()
167 | except Exception:
168 | text = ""
169 | raise ExecutionError(f"Put to url {upload_url} failed with status {resp.status}: {text}")
170 |
171 | # client side ETag checksum verification
172 | # the s3 ETag of a single part upload is a quoted md5 hex of the uploaded content
173 | etag = resp.headers["ETag"].strip()
174 | if etag.startswith(("W/", "w/")): # see https://www.rfc-editor.org/rfc/rfc7232#section-2.3
175 | etag = etag[2:]
176 | if etag[0] == '"' and etag[-1] == '"':
177 | etag = etag[1:-1]
178 | remote_md5 = etag
179 |
180 | local_md5_hex = payload.md5_checksum().hexdigest()
181 | if local_md5_hex != remote_md5:
182 | raise ExecutionError(
183 | f"Local data and remote data checksum mismatch ({local_md5_hex} vs {remote_md5})"
184 | )
185 |
186 | return remote_md5
187 |
188 | #print("Done upload to s3")
189 |
190 |
191 | async def perform_multipart_upload(
192 | data_file: Union[BinaryIO, io.BytesIO, io.FileIO],
193 | *,
194 | content_length: int,
195 | max_part_size: int,
196 | part_urls: List[str],
197 | completion_url: str,
198 | filename: str,
199 | upload_chunk_size: int = DEFAULT_SEGMENT_CHUNK_SIZE,
200 | ):
201 | upload_coros = []
202 | file_offset = 0
203 | num_bytes_left = content_length
204 |
205 | # Give each part its own IO reader object to avoid needing to
206 | # lock access to the reader's position pointer.
207 | data_file_readers: List[BinaryIO]
208 | if isinstance(data_file, io.BytesIO):
209 | view = data_file.getbuffer() # does not copy data
210 | data_file_readers = [io.BytesIO(view) for _ in range(len(part_urls))]
211 | else:
212 | filename = data_file.name
213 | data_file_readers = [open(filename, "rb") for _ in range(len(part_urls))]
214 |
215 | for part_number, (data_file_rdr, part_url) in enumerate(zip(data_file_readers, part_urls), start=1):
216 | part_length_bytes = min(num_bytes_left, max_part_size)
217 | part_payload = BytesIOSegmentPayload(
218 | data_file_rdr,
219 | segment_start=file_offset,
220 | segment_length=part_length_bytes,
221 | chunk_size=upload_chunk_size,
222 | filename=filename,
223 | )
224 | upload_coros.append(_upload_to_s3_url(part_url, payload=part_payload, content_type=None))
225 | num_bytes_left -= part_length_bytes
226 | file_offset += part_length_bytes
227 |
228 | part_etags = await asyncio.gather(*upload_coros)
229 |
230 | # The body of the complete_multipart_upload command needs some data in xml format:
231 | completion_body = "\n"
232 | for part_number, etag in enumerate(part_etags, 1):
233 | completion_body += f"""\n{part_number} \n"{etag}" \n \n"""
234 | completion_body += " "
235 |
236 | # etag of combined object should be md5 hex of concatendated md5 *bytes* from parts + `-{num_parts}`
237 | bin_hash_parts = [bytes.fromhex(etag) for etag in part_etags]
238 |
239 | expected_multipart_etag = hashlib.md5(b"".join(bin_hash_parts)).hexdigest() + f"-{len(part_etags)}"
240 | async with http_client_with_tls(timeout=None) as session:
241 | resp = await session.post(
242 | completion_url, data=completion_body.encode("ascii"), skip_auto_headers=["content-type"]
243 | )
244 | if resp.status != 200:
245 | try:
246 | msg = await resp.text()
247 | except Exception:
248 | msg = ""
249 | raise ExecutionError(f"Error when completing multipart upload: {resp.status}\n{msg}")
250 | else:
251 | response_body = await resp.text()
252 | if expected_multipart_etag not in response_body:
253 | raise ExecutionError(
254 | f"Hash mismatch on multipart upload assembly: {expected_multipart_etag} not in {response_body}"
255 | )
256 |
257 | async def blob_upload(spec):
258 | r"""
259 | This spec is not the same as FileUploadSpec.
260 | It is an altered version provided from the server
261 | that includes upload data on top of the existing
262 | FileUploadSpec type
263 | ---
264 | type: str,
265 | id: str,
266 | blob_id: str,
267 | data: {
268 | # byte upload only
269 | upload_url: str,
270 |
271 | # multipart upload only
272 | "max_part_size": str
273 | "part_urls": dict
274 | "completion_url": str
275 |
276 | # Fields from FileUploadSpec
277 | **FileUploadSpec
278 | }
279 | """
280 | is_multipart = spec["type"] == "multipart"
281 | resp = spec["data"]
282 |
283 | # Find file
284 | filename = resp["source_description"]
285 | data = open(filename, "rb")
286 |
287 | if is_multipart:
288 | await perform_multipart_upload(
289 | data,
290 | content_length=resp["content_length"],
291 | max_part_size=resp["max_part_size"],
292 | part_urls=resp["part_urls"],
293 | completion_url=resp["completion_url"],
294 | filename=filename,
295 | )
296 |
297 | else:
298 | content_length = resp["content_length"]
299 | upload_hashes = resp["upload_hashes"]
300 |
301 | payload = BytesIOSegmentPayload(data, segment_start=0, segment_length=content_length, filename = filename)
302 | await _upload_to_s3_url(
303 | resp["upload_url"],
304 | payload,
305 | # for single part uploads, we use server side md5 checksums
306 | content_md5_b64=upload_hashes["md5_base64"],
307 | )
308 |
309 | return spec
310 |
311 |
312 |
313 | def use_md5(url: str) -> bool:
314 | """This takes an upload URL in S3 and returns whether we should attach a checksum.
315 |
316 | It's only a workaround for missing functionality in moto.
317 | https://github.com/spulec/moto/issues/816
318 | """
319 | host = urlparse(url).netloc.split(":")[0]
320 | if host.endswith(".amazonaws.com"):
321 | return True
322 | elif host in ["127.0.0.1", "localhost", "172.21.0.1"]:
323 | return False
324 | else:
325 | raise Exception(f"Unknown S3 host: {host}")
326 |
327 |
328 |
--------------------------------------------------------------------------------
/python/upload/hash.py:
--------------------------------------------------------------------------------
1 | import os
2 | import base64
3 | import dataclasses
4 | import hashlib
5 | from typing import IO, Union, Tuple
6 | from .progress import progress_dict, progress_update
7 |
8 | HASH_CHUNK_SIZE = 4096
9 |
10 | def _update(hashers, data: Union[bytes, IO[bytes]], filename: str = None):
11 | if isinstance(data, bytes):
12 | for hasher in hashers:
13 | hasher.update(data)
14 | else:
15 | assert not isinstance(data, (bytearray, memoryview)) # https://github.com/microsoft/pyright/issues/5697
16 |
17 | total_size = os.fstat(data.fileno()).st_size
18 | processed_size = 0
19 |
20 | pos = data.tell()
21 | while 1:
22 | chunk = data.read(HASH_CHUNK_SIZE)
23 | if not isinstance(chunk, bytes):
24 | raise ValueError(f"Only accepts bytes or byte buffer objects, not {type(chunk)} buffers")
25 | if not chunk:
26 | break
27 | for hasher in hashers:
28 | hasher.update(chunk)
29 |
30 | # Calculate and display the progress
31 | processed_size += len(chunk)
32 | #progress = processed_size / total_size * 100
33 | #print(f'\rProgress: {progress_dict}', end='', flush=True)
34 | progress_update(filename=filename, value=total_size - processed_size, max=total_size)
35 |
36 | #print("")
37 | data.seek(pos)
38 |
39 |
40 | def get_sha256_hex(data: Union[bytes, IO[bytes]]) -> str:
41 | hasher = hashlib.sha256()
42 | _update([hasher], data)
43 | return hasher.hexdigest()
44 |
45 |
46 | def get_sha256_base64(data: Union[bytes, IO[bytes]]) -> str:
47 | hasher = hashlib.sha256()
48 | _update([hasher], data)
49 | return base64.b64encode(hasher.digest()).decode("ascii")
50 |
51 |
52 | def get_md5_base64(data: Union[bytes, IO[bytes]]) -> str:
53 | hasher = hashlib.md5()
54 | _update([hasher], data)
55 | return base64.b64encode(hasher.digest()).decode("utf-8")
56 |
57 |
58 | @dataclasses.dataclass
59 | class UploadHashes:
60 | md5_base64: str
61 | sha256_base64: str
62 |
63 |
64 | def get_upload_hashes(data: Union[bytes, IO[bytes]], filename: str = None) -> Tuple[UploadHashes, str]:
65 | md5 = hashlib.md5()
66 | sha256 = hashlib.sha256()
67 | _update([md5, sha256], data, filename)
68 |
69 | return UploadHashes(
70 | md5_base64=base64.b64encode(md5.digest()).decode("ascii"),
71 | sha256_base64=base64.b64encode(sha256.digest()).decode("ascii"),
72 | ), sha256.hexdigest()
73 |
--------------------------------------------------------------------------------
/python/upload/net.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import contextlib
3 | import certifi
4 | import socket
5 | import ssl
6 | import time
7 | import uuid
8 |
9 | import aiohttp
10 | from aiohttp import ClientSession, ClientTimeout, TCPConnector
11 | from aiohttp.web import Application
12 | from aiohttp.web_runner import AppRunner, SockSite
13 | from grpclib import GRPCError
14 | from grpclib import GRPCError, Status
15 | from grpclib.exceptions import StreamTerminatedError
16 | from typing import (
17 | Any,
18 | AsyncIterator,
19 | Dict,
20 | List,
21 | Optional,
22 | Type,
23 | TypeVar,
24 | )
25 |
26 |
27 | RETRYABLE_GRPC_STATUS_CODES = [
28 | Status.DEADLINE_EXCEEDED,
29 | Status.UNAVAILABLE,
30 | Status.CANCELLED,
31 | Status.INTERNAL,
32 | ]
33 |
34 | def http_client_with_tls(timeout: Optional[float]) -> ClientSession:
35 | """Create a new HTTP client session with standard, bundled TLS certificates.
36 |
37 | This is necessary to prevent client issues on some system where Python does
38 | not come pre-installed with specific TLS certificates that are necessary to
39 | connect to AWS S3 bucket URLs.
40 |
41 | Specifically: the error "unable to get local issuer certificate" when making
42 | an aiohttp request.
43 | """
44 | ssl_context = ssl.create_default_context(cafile=certifi.where())
45 | connector = TCPConnector(ssl=ssl_context)
46 | return ClientSession(connector=connector, timeout=ClientTimeout(total=timeout))
47 |
48 |
49 | @contextlib.asynccontextmanager
50 | async def run_temporary_http_server(app: Application):
51 | # Allocates a random port, runs a server in a context manager
52 | # This is used in various tests
53 | sock = socket.socket()
54 | sock.bind(("", 0))
55 | port = sock.getsockname()[1]
56 | host = f"http://127.0.0.1:{port}"
57 |
58 | runner = AppRunner(app)
59 | await runner.setup()
60 | site = SockSite(runner, sock=sock)
61 | await site.start()
62 | try:
63 | yield host
64 | finally:
65 | await runner.cleanup()
66 |
67 |
68 | async def make_post_request(url, data):
69 | async with aiohttp.ClientSession() as session:
70 | async with session.post(url, json=data) as response:
71 | return await response.json()
72 |
73 |
74 |
75 | async def retry_transient_errors(
76 | fn,
77 | *args,
78 | base_delay: float = 0.1,
79 | max_delay: float = 1,
80 | delay_factor: float = 2,
81 | max_retries: Optional[int] = 3,
82 | additional_status_codes: list = [],
83 | attempt_timeout: Optional[float] = None, # timeout for each attempt
84 | total_timeout: Optional[float] = None, # timeout for the entire function call
85 | attempt_timeout_floor=2.0, # always have at least this much timeout (only for total_timeout)
86 | ):
87 | """Retry on transient gRPC failures with back-off until max_retries is reached.
88 | If max_retries is None, retry forever."""
89 |
90 | delay = base_delay
91 | n_retries = 0
92 |
93 | status_codes = [*RETRYABLE_GRPC_STATUS_CODES, *additional_status_codes]
94 |
95 | idempotency_key = str(uuid.uuid4())
96 |
97 | t0 = time.time()
98 | if total_timeout is not None:
99 | total_deadline = t0 + total_timeout
100 | else:
101 | total_deadline = None
102 |
103 | while True:
104 |
105 | metadata = [("x-idempotency-key", idempotency_key), ("x-retry-attempt", str(n_retries))]
106 | if n_retries > 0:
107 | metadata.append(("x-retry-delay", str(time.time() - t0)))
108 | timeouts = []
109 |
110 | if attempt_timeout is not None:
111 | timeouts.append(attempt_timeout)
112 | if total_timeout is not None:
113 | timeouts.append(max(total_deadline - time.time(), attempt_timeout_floor))
114 | if timeouts:
115 | timeout = min(timeouts) # In case the function provided both types of timeouts
116 | else:
117 | timeout = None
118 | try:
119 | return await fn(*args, metadata=metadata, timeout=timeout)
120 |
121 | except (StreamTerminatedError, GRPCError, socket.gaierror, asyncio.TimeoutError) as exc:
122 | if isinstance(exc, GRPCError) and exc.status not in status_codes:
123 | raise exc
124 |
125 | if max_retries is not None and n_retries >= max_retries:
126 | raise exc
127 |
128 | if total_deadline and time.time() + delay + attempt_timeout_floor >= total_deadline:
129 | # no point sleeping if that's going to push us past the deadline
130 | raise exc
131 |
132 | n_retries += 1
133 |
134 | await asyncio.sleep(delay)
135 | delay = min(delay * delay_factor, max_delay)
136 |
137 |
--------------------------------------------------------------------------------
/python/upload/progress.py:
--------------------------------------------------------------------------------
1 | import dataclasses
2 | from typing import Dict
3 |
4 |
5 | @dataclasses.dataclass
6 | class FileProgress:
7 | max: int
8 | value: int
9 |
10 | progress_dict: Dict[str, FileProgress] = {}
11 |
12 | def progress_update(filename, value, max):
13 | # filename can be PosixPath
14 | filename = str(filename)
15 |
16 | progress = progress_dict.get(filename, FileProgress(0, 0))
17 | progress.value = value
18 | progress.max = max
19 |
20 | progress_dict[filename] = progress
21 |
22 |
23 | def reset_progress(progress_dict):
24 | progress_dict = {}
25 |
26 |
27 |
28 |
--------------------------------------------------------------------------------
/python/upload/spec.py:
--------------------------------------------------------------------------------
1 | import io
2 | import copy
3 | import os
4 | import base64
5 | import platform
6 | from aiohttp import web
7 | import asyncio
8 | import dataclasses
9 |
10 | import concurrent.futures
11 | import time
12 | from pathlib import Path, PurePosixPath
13 | from typing import (
14 | IO,
15 | AsyncGenerator,
16 | AsyncIterator,
17 | BinaryIO,
18 | Dict,
19 | Callable,
20 | Generator,
21 | Optional,
22 | Sequence,
23 | Type,
24 | Union,
25 | Any,
26 | )
27 | import aiostream
28 | import zlib
29 | from .sync import synchronize_api
30 | from .blob import LARGE_FILE_LIMIT
31 | from .hash import get_sha256_hex, get_upload_hashes, UploadHashes
32 |
33 | def get_content_length(data: BinaryIO):
34 | # *Remaining* length of file from current seek position
35 | pos = data.tell()
36 | data.seek(0, os.SEEK_END)
37 | content_length = data.tell()
38 | data.seek(pos)
39 | return content_length - pos
40 |
41 |
42 | @dataclasses.dataclass
43 | class FileUploadSpec:
44 | source: Callable[[], BinaryIO]
45 | source_description: Any
46 | mount_filename: str
47 | use_blob: bool
48 | content: Optional[bytes] # typically None if using blob, required otherwise
49 | sha256_hex: str
50 | mode: int # file permission bits (last 12 bits of st_mode)
51 | size: int
52 | upload_hashes: UploadHashes
53 |
54 | # Example usage:
55 |
56 | def _get_file_upload_spec(
57 | source: Callable[[], BinaryIO], source_description: Any, mount_filename: PurePosixPath, mode: int
58 | ) -> FileUploadSpec:
59 |
60 | with source() as fp:
61 | # Current position is ignored - we always upload from position 0
62 | fp.seek(0, os.SEEK_END)
63 | size = fp.tell()
64 | fp.seek(0)
65 |
66 | # Hotfix - cannot send bytes over http
67 | use_blob = True
68 | content = None
69 |
70 | #sha256_hex = get_sha256_hex(fp)
71 | upload_hashes, sha256_hex = get_upload_hashes(fp, source_description)
72 |
73 | """
74 | if size >= LARGE_FILE_LIMIT:
75 | use_blob = True
76 | content = None
77 | sha256_hex = get_sha256_hex(fp)
78 | else:
79 | use_blob = False
80 | content = fp.read()
81 | sha256_hex = get_sha256_hex(content)
82 | """
83 |
84 | return FileUploadSpec(
85 | source=source,
86 | source_description=source_description,
87 | mount_filename=mount_filename.as_posix(),
88 | use_blob=use_blob,
89 | content=content,
90 | sha256_hex=sha256_hex,
91 | upload_hashes=upload_hashes,
92 | mode=mode & 0o7777,
93 | size=size,
94 | )
95 |
96 |
97 | def get_file_upload_spec_from_path(
98 | filename: Path, mount_filename: PurePosixPath, mode: Optional[int] = None
99 | ) -> FileUploadSpec:
100 | # Python appears to give files 0o666 bits on Windows (equal for user, group, and global),
101 | # so we mask those out to 0o755 for compatibility with POSIX-based permissions.
102 | mode = mode or os.stat(filename).st_mode & (0o7777 if platform.system() != "Windows" else 0o7755)
103 | return _get_file_upload_spec(
104 | lambda: open(filename, "rb"),
105 | filename,
106 | mount_filename,
107 | mode,
108 | )
109 |
110 |
111 | def get_file_upload_spec_from_fileobj(fp: BinaryIO, mount_filename: PurePosixPath, mode: int) -> FileUploadSpec:
112 | def source():
113 | # We ignore position in stream and always upload from position 0
114 | fp.seek(0)
115 | return fp
116 |
117 | return _get_file_upload_spec(
118 | source,
119 | str(fp),
120 | mount_filename,
121 | mode,
122 | )
123 |
124 |
125 | def serialize_spec(spec: FileUploadSpec) -> Dict[Any, Any]:
126 | """
127 | Turns the spec into a http-transferrable
128 | dictionary
129 | """
130 | obj = spec.__dict__
131 |
132 | obj["source_description"] = str(spec.source_description)
133 |
134 |
135 | with spec.source() as data:
136 | upload_hashes = obj["upload_hashes"].__dict__
137 |
138 | if isinstance(data, bytes):
139 | data = io.BytesIO(data)
140 |
141 | content_length = get_content_length(data)
142 |
143 | obj["upload_hashes"] = upload_hashes
144 | obj["content_length"] = content_length
145 | del obj["source"]
146 |
147 | # Serialize bytes
148 | # At the moment this is too large to
149 | # send over http
150 | if obj["content"] is not None:
151 | encoded_content = base64.b64encode(obj["content"]).decode('utf-8')
152 | obj["content"] = encoded_content
153 |
154 | return obj
155 |
156 | class _FileSpecContextManager:
157 | """Context manager for batch-uploading files to a Volume."""
158 |
159 | def __init__(self, file_specs):
160 | """mdmd:hidden"""
161 | self._upload_generators = []
162 |
163 | self.files_stream = file_specs
164 |
165 | async def __aenter__(self):
166 | return self
167 |
168 | async def __aexit__(self, exc_type, exc_val, exc_tb):
169 | if not exc_val:
170 | return self
171 | # Flatten all the uploads yielded by the upload generators in the batch
172 | """
173 | def gen_upload_providers():
174 | for gen in self._upload_generators:
175 | yield from gen
176 |
177 | async def gen_file_upload_specs(): # -> AsyncGenerator[FileUploadSpec, None]:
178 | loop = asyncio.get_event_loop()
179 | with concurrent.futures.ThreadPoolExecutor() as exe:
180 | # TODO: avoid eagerly expanding
181 | futs = [loop.run_in_executor(exe, f) for f in gen_upload_providers()]
182 | #print(f"Computing checksums for {len(futs)} files using {exe._max_workers} workers")
183 | for fut in asyncio.as_completed(futs):
184 | yield await fut
185 | """
186 |
187 | async def generate_specs(self):
188 | # Flatten all the uploads yielded by the upload generators in the batch
189 | def gen_upload_providers():
190 | for gen in self._upload_generators:
191 | yield from gen
192 |
193 | async def gen_file_upload_specs(): # -> AsyncGenerator[FileUploadSpec, None]:
194 | loop = asyncio.get_event_loop()
195 | with concurrent.futures.ThreadPoolExecutor() as exe:
196 | # TODO: avoid eagerly expanding
197 | futs = [loop.run_in_executor(exe, f) for f in gen_upload_providers()]
198 | #print(f"Computing checksums for {len(futs)} files using {exe._max_workers} workers")
199 | for fut in asyncio.as_completed(futs):
200 | yield await fut
201 |
202 | files_stream = aiostream.stream.iterate(gen_file_upload_specs())
203 | #print(files_stream)
204 | self.files_stream = files_stream
205 | return files_stream
206 |
207 |
208 | def put_file(
209 | self,
210 | local_file: Union[Path, str, BinaryIO],
211 | remote_path: Union[PurePosixPath, str],
212 | mode: Optional[int] = None,
213 | ):
214 | """Upload a file from a local file or file-like object.
215 |
216 | Will create any needed parent directories automatically.
217 |
218 | If `local_file` is a file-like object it must remain readable for the lifetime of the batch.
219 | """
220 | remote_path = PurePosixPath(remote_path).as_posix()
221 | if remote_path.endswith("/"):
222 | raise ValueError(f"remote_path ({remote_path}) must refer to a file - cannot end with /")
223 |
224 | def gen():
225 | if isinstance(local_file, str) or isinstance(local_file, Path):
226 | yield lambda: get_file_upload_spec_from_path(local_file, PurePosixPath(remote_path), mode)
227 | else:
228 | yield lambda: get_file_upload_spec_from_fileobj(local_file, PurePosixPath(remote_path), mode or 0o644)
229 |
230 |
231 | self._upload_generators.append(gen())
232 |
233 |
234 | def put_model(
235 | self,
236 | local_file: Union[Path, str, BinaryIO],
237 | model_type: str,
238 | remote_path: Union[PurePosixPath, str],
239 | mode: Optional[int] = None,
240 | ):
241 | """Upload a file from a local file or file-like object.
242 |
243 | Will create any needed parent directories automatically.
244 |
245 | If `local_file` is a file-like object it must remain readable for the lifetime of the batch.
246 | """
247 | remote_path = PurePosixPath(remote_path).as_posix()
248 | if remote_path.endswith("/"):
249 | raise ValueError(f"remote_path ({remote_path}) must refer to a file - cannot end with /")
250 |
251 | def gen():
252 | if isinstance(local_file, str) or isinstance(local_file, Path):
253 | yield lambda: get_file_upload_spec_from_path(local_file, PurePosixPath(remote_path), mode)
254 | else:
255 | yield lambda: get_file_upload_spec_from_fileobj(local_file, PurePosixPath(remote_path), mode or 0o644)
256 |
257 |
258 | self._upload_generators.append(gen())
259 |
260 | def get_files_stream(self):
261 | return self.files_stream
262 |
263 | def put_directory(
264 | self,
265 | local_path: Union[Path, str],
266 | remote_path: Union[PurePosixPath, str],
267 | recursive: bool = True,
268 | ):
269 | """
270 | Upload all files in a local directory.
271 |
272 | Will create any needed parent directories automatically.
273 | """
274 | local_path = Path(local_path)
275 | assert local_path.is_dir()
276 | remote_path = PurePosixPath(remote_path)
277 |
278 | def create_file_spec_provider(subpath):
279 | relpath_str = subpath.relative_to(local_path)
280 | return lambda: get_file_upload_spec_from_path(subpath, remote_path / relpath_str)
281 |
282 | def gen():
283 | glob = local_path.rglob("*") if recursive else local_path.glob("*")
284 | for subpath in glob:
285 | # skip if subpath contains .git or __pycache__
286 | if ".git" in subpath.parts or "__pycache__" in subpath.parts:
287 | continue
288 |
289 | # Skip directories and unsupported file types (e.g. block devices)
290 | if subpath.is_file():
291 | yield create_file_spec_provider(subpath)
292 |
293 | self._upload_generators.append(gen())
294 |
295 | FileSpecContextManager = synchronize_api(_FileSpecContextManager)
296 |
--------------------------------------------------------------------------------
/python/upload/sync.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import concurrent.futures
3 | import functools
4 | import inspect
5 | import sys
6 | import time
7 | import typing
8 | from contextlib import asynccontextmanager
9 | from typing import Any, AsyncGenerator, Callable, Iterator, List, Optional, Set, TypeVar, cast
10 |
11 | import synchronicity
12 | from typing_extensions import ParamSpec
13 |
14 |
15 | synchronizer = synchronicity.Synchronizer()
16 |
17 | def synchronize_api(obj, target_module=None):
18 | if inspect.isclass(obj):
19 | blocking_name = obj.__name__.lstrip("_")
20 | elif inspect.isfunction(object):
21 | blocking_name = obj.__name__.lstrip("_")
22 | elif isinstance(obj, TypeVar):
23 | blocking_name = "_BLOCKING_" + obj.__name__
24 | else:
25 | blocking_name = None
26 | if target_module is None:
27 | target_module = obj.__module__
28 | return synchronizer.create_blocking(obj, blocking_name, target_module=target_module)
29 |
30 | def retry(direct_fn=None, *, n_attempts=3, base_delay=0, delay_factor=2, timeout=90):
31 | """Decorator that calls an async function multiple times, with a given timeout.
32 |
33 | If a `base_delay` is provided, the function is given an exponentially
34 | increasing delay on each run, up until the maximum number of attempts.
35 |
36 | Usage:
37 |
38 | ```
39 | @retry
40 | async def may_fail_default():
41 | # ...
42 | pass
43 |
44 | @retry(n_attempts=5, base_delay=1)
45 | async def may_fail_delay():
46 | # ...
47 | pass
48 | ```
49 | """
50 |
51 | def decorator(fn):
52 | @functools.wraps(fn)
53 | async def f_wrapped(*args, **kwargs):
54 | delay = base_delay
55 | for i in range(n_attempts):
56 | t0 = time.time()
57 | try:
58 | return await asyncio.wait_for(fn(*args, **kwargs), timeout=timeout)
59 | except asyncio.CancelledError:
60 | #logger.debug(f"Function {fn} was cancelled")
61 | raise
62 | except Exception as e:
63 | if i >= n_attempts - 1:
64 | raise
65 | """
66 | logger.debug(
67 | f"Failed invoking function {fn}: {e}"
68 | f" (took {time.time() - t0}s, sleeping {delay}s"
69 | f" and trying {n_attempts - i - 1} more times)"
70 | )
71 | """
72 | await asyncio.sleep(delay)
73 | delay *= delay_factor
74 |
75 | return f_wrapped
76 |
77 | if direct_fn is not None:
78 | # It's invoked like @retry
79 | return decorator(direct_fn)
80 | else:
81 | # It's invoked like @retry(n_attempts=...)
82 | return decorator
83 |
84 |
--------------------------------------------------------------------------------
/python/user.py:
--------------------------------------------------------------------------------
1 | import os
2 | import base64
3 | import uuid
4 | import json
5 |
6 | file_path = "../.comfycloud_profile"
7 |
8 | def load_user_profile():
9 | if os.path.exists(file_path):
10 | try:
11 | with open(file_path, 'r') as file:
12 | data = json.load(file)
13 | return data
14 | except json.JSONDecodeError:
15 | # @todo
16 | # delete corrupted file and create a new one
17 | print(f"Error decoding user profile")
18 | else:
19 | # If the file doesn't exist, create a new one with default values
20 | data = {"id": str(uuid.uuid4())}
21 | save_user_profile(data)
22 |
23 | return data
24 |
25 | def save_user_profile(data):
26 | with open(file_path, 'w') as file:
27 | json.dump(data, file)
28 |
29 |
30 |
31 |
--------------------------------------------------------------------------------
/python/utils/custom_nodes.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import contextlib
4 | import importlib
5 | import traceback
6 | import folder_paths
7 |
8 | @contextlib.contextmanager
9 | def suppress_console():
10 | with open(os.devnull, 'w') as devnull:
11 | old_stdout = sys.stdout
12 | old_stderr = sys.stderr
13 | sys.stdout = devnull
14 | sys.stderr = devnull
15 | try:
16 | yield
17 | finally:
18 | sys.stdout = old_stdout
19 | sys.stderr = old_stderr
20 |
21 | def get_custom_node_mappings(module_path, ignore=set()):
22 | mappings = []
23 |
24 | module_name = os.path.basename(module_path)
25 | if os.path.isfile(module_path):
26 | sp = os.path.splitext(module_path)
27 | module_name = sp[0]
28 | try:
29 | if os.path.isfile(module_path):
30 | module_spec = importlib.util.spec_from_file_location(module_name, module_path)
31 | module_dir = os.path.split(module_path)[0]
32 | else:
33 | module_spec = importlib.util.spec_from_file_location(module_name, os.path.join(module_path, "__init__.py"))
34 | module_dir = module_path
35 |
36 | module = importlib.util.module_from_spec(module_spec)
37 | module_spec.loader.exec_module(module)
38 |
39 | if hasattr(module, "NODE_CLASS_MAPPINGS") and getattr(module, "NODE_CLASS_MAPPINGS") is not None:
40 | for name in module.NODE_CLASS_MAPPINGS:
41 | if name not in ignore:
42 | mappings.append(name)
43 |
44 | return (module_name, mappings)
45 | else:
46 | # Skip module for custom nodes due to the lack of NODE_CLASS_MAPPINGS.
47 | return (module_name, [])
48 | except Exception as e:
49 | return (module_name, [])
50 |
51 | def get_custom_node_list():
52 | """
53 | Returns a list of custom nodes.
54 | """
55 | custom_nodes = {}
56 | node_paths = folder_paths.get_folder_paths("custom_nodes")
57 | for custom_node_path in node_paths:
58 | possible_modules = os.listdir(os.path.realpath(custom_node_path))
59 | if "__pycache__" in possible_modules:
60 | possible_modules.remove("__pycache__")
61 |
62 | for possible_module in possible_modules:
63 | module_path = os.path.join(custom_node_path, possible_module)
64 | if os.path.isfile(module_path) and os.path.splitext(module_path)[1] != ".py": continue
65 | if module_path.endswith(".disabled"): continue
66 | mappings = get_custom_node_mappings(module_path)
67 | custom_nodes[mappings[0]] = mappings[1]
68 |
69 | return custom_nodes
70 |
71 | def get_custom_node_list_silent():
72 | with suppress_console():
73 | return get_custom_node_list()
74 |
--------------------------------------------------------------------------------
/python/utils/paths.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | def _search_dependency_paths(root_dir, name):
4 | """
5 | This function returns the full path of a file/folder
6 | when given the name
7 | - search_dir is a filepath
8 | - names is a list of filenames or folder names
9 | """
10 | for dirpath, dirnames, filenames in os.walk(root_dir):
11 |
12 | relative_path = os.path.relpath(dirpath, root_dir)
13 | potential_path = os.path.normpath(os.path.join(root_dir, relative_path, name))
14 |
15 | if os.path.exists(potential_path):
16 | return os.path.normpath(os.path.join(relative_path, name))
17 |
18 | raise Exception(f"Required {name} was not found in {root_dir} folder. Make sure you do not have extra_paths.yaml enabled")
19 |
20 | def _search_dependency_paths_from_list(root_dir, names):
21 | results = []
22 | for name in names:
23 | try:
24 | result = _search_dependency_paths(root_dir, name)
25 | results.append(result)
26 | except Exception as e:
27 | raise
28 | return results
29 |
30 |
31 | def to_linux_path(path):
32 | return path.replace("\\", "/")
33 |
34 |
35 | def build_paths(paths_to_upload, dep_lists, workflow_id=""):
36 | """
37 | Handles finding, building, verifying path
38 | """
39 | paths = []
40 | for dirname, base_path in paths_to_upload.items():
41 | to_upload = _search_dependency_paths_from_list(base_path, dep_lists[dirname])
42 |
43 | for path in to_upload:
44 | local_path = os.path.join(base_path, path)
45 | # normpath converts the path to system specific path
46 | norm_local_path = os.path.normpath(local_path)
47 |
48 | local_path = os.path.join(base_path, dirname, path)
49 | remote_path = to_linux_path(os.path.normpath(os.path.join("vol", workflow_id, "comfyui", dirname, path)))
50 |
51 | paths.append((norm_local_path, remote_path))
52 |
53 | return paths
54 |
55 |
--------------------------------------------------------------------------------
/python/utils/requirements.py:
--------------------------------------------------------------------------------
1 | import os
2 | import re
3 | import folder_paths
4 | from packaging.requirements import Requirement, InvalidRequirement
5 | from packaging.specifiers import SpecifierSet, InvalidSpecifier
6 |
7 | def update_requirements():
8 | """
9 | Formats requirements.txt
10 | """
11 |
12 | base = folder_paths.base_path
13 | custom_nodes_dir = os.path.join(base, "custom_nodes")
14 |
15 | # Loop over the files in the specified directory
16 | for custom_node in os.listdir(custom_nodes_dir):
17 | filepath = os.path.join(custom_nodes_dir, custom_node, "requirements.txt")
18 |
19 | # Check if the file is a requirements.txt file
20 | if os.path.exists(filepath):
21 |
22 | # Read the content of the requirements.txt file
23 | with open(filepath, 'r') as file:
24 | print("Checking requirements", filepath)
25 | lines = file.readlines()
26 |
27 | # Update dependencies in the file content
28 | updated_lines = []
29 | for line in lines:
30 | # Check if the line contains a git dependency without the specified format
31 | updated_line = remove_comments(line)
32 | if updated_line is None:
33 | # Skip if the line is a comment
34 | continue
35 |
36 | # Remove newlines, empty spaces, and tabs
37 | if updated_line == "" or updated_line == "\n":
38 | # Skip if the line is empty
39 | continue
40 |
41 | updated_line = patch_git_urls(updated_line)
42 | validate_requirements(updated_line, filepath)
43 |
44 | updated_lines.append(updated_line)
45 |
46 | # Write the updated content back to the requirements.txt file
47 | with open(filepath, 'w') as file:
48 | file.writelines(updated_lines)
49 |
50 | def patch_git_urls(line: str):
51 | """
52 | Because UV doesn't support requirements formatted as git+https:// with
53 | no prefix, so we append the package name in-front of the git url
54 |
55 | For example, WAS nodes has a requirements formatted as:
56 | - git+https://github.com/WASasquatch/img2texture.git
57 | - git+https://github.com/WASasquatch/cstr
58 |
59 | This function would transform it to
60 | - img2texture @ git+https://github.com/WASasquatch/img2texture.git
61 | - cstr @ git+https://github.com/WASasquatch/cstr
62 | """
63 | # Check if the line contains a git dependency without the specified format
64 | match_git = re.match(r'^\s*git\+https://.*?/([^/]+)\.git', line)
65 | match_git_plus = re.match(r'^\s*git\+https://.*?/([^/]+)$', line)
66 |
67 | updated_line = line
68 | if match_git:
69 | package_name = match_git.group(1)
70 | updated_line = f"{package_name} @ {line.strip()}\n"
71 | elif match_git_plus:
72 | package_name = match_git_plus.group(1).strip()
73 | updated_line = f"{package_name} @ {line.strip()}\n"
74 |
75 | return updated_line
76 |
77 | def remove_comments(line: str):
78 | """
79 | Removes comments from the line
80 |
81 | For example, facerestore_cf has a requirement formatted like:
82 | - gdown # supports downloading the large file from Google Drive
83 | """
84 | # if line is only #, return None
85 | if re.match(r'^\s*#', line):
86 | return None
87 |
88 | return re.sub(r'#.*', '', line)
89 |
90 | def validate_requirements(line: str, filepath: str):
91 | """
92 | Check if the line is a valid requirement
93 |
94 | For example, comfyui-dream-project custom node causes an error because
95 | it has a requirement formatted like:
96 | - numpy<1.24>=1.18
97 |
98 | UV doesn't support this format
99 | (https://github.com/astral-sh/uv/blob/bb61c2d5343b8b0645178e1c4b74f1493834b771/crates/requirements-txt/src/lib.rs#L203)
100 | """
101 | try:
102 | req = Requirement(line)
103 |
104 | # Validate the specifiers
105 | SpecifierSet(str(req.specifier))
106 | except Exception as e:
107 | raise Exception(
108 | f"error: Couldn't parse requirement in `requirements.txt`\n"
109 | f" Caused by: {str(e)}.\n\n"
110 | f" File: {filepath}\n"
111 | )
112 |
113 |
114 |
115 |
--------------------------------------------------------------------------------
/python/utils/task.py:
--------------------------------------------------------------------------------
1 | import dataclasses
2 | import uuid
3 | from enum import Enum
4 | from typing import Any, Dict
5 |
6 | class TaskStatus(Enum):
7 | STARTED = "Started"
8 | COMPLETED = "Completed"
9 | HASHING = "Hashing"
10 | UPLOADING = "Uploading"
11 | ERROR = "Failed"
12 |
13 | @dataclasses.dataclass
14 | class Task:
15 | status: TaskStatus
16 | message: str
17 | progress: Any
18 |
19 | task_dict: Dict[str, Task] = {}
20 |
21 | def task_create() -> str:
22 | task_id = str(uuid.uuid4())
23 |
24 | new_task = Task(
25 | status=TaskStatus.STARTED,
26 | message=None,
27 | progress=None
28 | )
29 |
30 | task_dict[task_id] = new_task
31 |
32 | return task_id
33 |
34 | def task_get_by_id(task_id: str) -> Task:
35 | default_task = Task(TaskStatus.ERROR, "Task not found", None)
36 | return task_dict.get(task_id, default_task)
37 |
38 | def task_set_progress(task_id: str, progress: str) -> Task:
39 | task = task_get_by_id(task_id)
40 | if task.status != TaskStatus.ERROR:
41 | task.progress = progress
42 |
43 | return task
44 |
45 | def task_set_message(task_id: str, message: str) -> Task:
46 | task = task_get_by_id(task_id)
47 | if task.status != TaskStatus.ERROR:
48 | task.message = message
49 |
50 | return task
51 |
52 | def task_set_status(task_id: str, status: TaskStatus) -> Task:
53 | task = task_get_by_id(task_id)
54 | if task.status != TaskStatus.ERROR:
55 |
56 | if status == TaskStatus.HASHING and task.status == TaskStatus.UPLOADING:
57 | task.progress = None
58 |
59 | task.status = status
60 |
61 | return task
62 |
63 | def custom_asdict_factory(data):
64 |
65 | # serialize the enum
66 | def convert_value(obj):
67 | if isinstance(obj, Enum):
68 | return obj.value
69 | return obj
70 |
71 | return dict((k, convert_value(v)) for k, v in data)
72 |
73 | def task_serialize(task: Task) -> Dict[str, Any]:
74 | return dataclasses.asdict(task, dict_factory=custom_asdict_factory)
75 |
76 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | modal
2 | synchronicity
3 | aiostream
4 |
--------------------------------------------------------------------------------