├── README.md
├── loopback_workflow.json
├── PromptEdit_Workflow.json
├── WordAsImage_workflow.json
└── SyrianFalconNodes.py
/README.md:
--------------------------------------------------------------------------------
1 | # Comfy_custom_nodes
2 | My custom nodes for ComfyUI. Just download the python script file and put inside ComfyUI/custom_nodes folder
3 |
4 | Prompt editing
5 |
6 | [a: b :step] --> replcae a by b at step
7 |
8 | [a:step] --> add a at step
9 |
10 | [a::step] --> remove a at step
11 |
12 | Alternating Tokens
13 |
14 | --> alternate between a and b for all steps (could add more tokens)
15 |
16 | note: to stop alternating at a certain step, use prompt editing e.g. an [: apple :10] on a table
17 |
18 | 
19 |
20 |
21 |
22 |
23 |
24 | Word as Image
25 |
26 | This node basically allows for user text input to be converted to an image of a black background and white text to be used with depth controlnet or T2I adapter models.
27 |
28 | 
29 |
--------------------------------------------------------------------------------
/loopback_workflow.json:
--------------------------------------------------------------------------------
1 | {
2 | "last_node_id": 11,
3 | "last_link_id": 17,
4 | "nodes": [
5 | {
6 | "id": 7,
7 | "type": "CLIPTextEncode",
8 | "pos": [
9 | 413,
10 | 389
11 | ],
12 | "size": {
13 | "0": 425.27801513671875,
14 | "1": 180.6060791015625
15 | },
16 | "flags": {},
17 | "order": 3,
18 | "mode": 0,
19 | "inputs": [
20 | {
21 | "name": "clip",
22 | "type": "CLIP",
23 | "link": 5
24 | }
25 | ],
26 | "outputs": [
27 | {
28 | "name": "CONDITIONING",
29 | "type": "CONDITIONING",
30 | "links": [
31 | 12
32 | ],
33 | "slot_index": 0
34 | }
35 | ],
36 | "properties": {
37 | "Node name for S&R": "CLIPTextEncode"
38 | },
39 | "widgets_values": [
40 | "text, watermark"
41 | ]
42 | },
43 | {
44 | "id": 6,
45 | "type": "CLIPTextEncode",
46 | "pos": [
47 | 415,
48 | 186
49 | ],
50 | "size": {
51 | "0": 422.84503173828125,
52 | "1": 164.31304931640625
53 | },
54 | "flags": {},
55 | "order": 2,
56 | "mode": 0,
57 | "inputs": [
58 | {
59 | "name": "clip",
60 | "type": "CLIP",
61 | "link": 3
62 | }
63 | ],
64 | "outputs": [
65 | {
66 | "name": "CONDITIONING",
67 | "type": "CONDITIONING",
68 | "links": [
69 | 11
70 | ],
71 | "slot_index": 0
72 | }
73 | ],
74 | "properties": {
75 | "Node name for S&R": "CLIPTextEncode"
76 | },
77 | "widgets_values": [
78 | "beautiful scenery nature glass bottle landscape, , purple galaxy bottle,"
79 | ]
80 | },
81 | {
82 | "id": 5,
83 | "type": "EmptyLatentImage",
84 | "pos": [
85 | 473,
86 | 609
87 | ],
88 | "size": {
89 | "0": 315,
90 | "1": 106
91 | },
92 | "flags": {},
93 | "order": 0,
94 | "mode": 0,
95 | "outputs": [
96 | {
97 | "name": "LATENT",
98 | "type": "LATENT",
99 | "links": [
100 | 13
101 | ],
102 | "slot_index": 0
103 | }
104 | ],
105 | "properties": {
106 | "Node name for S&R": "EmptyLatentImage"
107 | },
108 | "widgets_values": [
109 | 512,
110 | 512,
111 | 1
112 | ]
113 | },
114 | {
115 | "id": 9,
116 | "type": "SaveImage",
117 | "pos": [
118 | 1781,
119 | 275
120 | ],
121 | "size": [
122 | 210,
123 | 270
124 | ],
125 | "flags": {},
126 | "order": 6,
127 | "mode": 0,
128 | "inputs": [
129 | {
130 | "name": "images",
131 | "type": "IMAGE",
132 | "link": 17
133 | }
134 | ],
135 | "properties": {},
136 | "widgets_values": [
137 | "ComfyUI"
138 | ]
139 | },
140 | {
141 | "id": 4,
142 | "type": "CheckpointLoaderSimple",
143 | "pos": [
144 | 26,
145 | 474
146 | ],
147 | "size": {
148 | "0": 315,
149 | "1": 98
150 | },
151 | "flags": {},
152 | "order": 1,
153 | "mode": 0,
154 | "outputs": [
155 | {
156 | "name": "MODEL",
157 | "type": "MODEL",
158 | "links": [
159 | 10
160 | ],
161 | "slot_index": 0
162 | },
163 | {
164 | "name": "CLIP",
165 | "type": "CLIP",
166 | "links": [
167 | 3,
168 | 5
169 | ],
170 | "slot_index": 1
171 | },
172 | {
173 | "name": "VAE",
174 | "type": "VAE",
175 | "links": [
176 | 16
177 | ],
178 | "slot_index": 2
179 | }
180 | ],
181 | "properties": {
182 | "Node name for S&R": "CheckpointLoaderSimple"
183 | },
184 | "widgets_values": [
185 | "deliberate_v2.ckpt"
186 | ]
187 | },
188 | {
189 | "id": 11,
190 | "type": "VAEDecode",
191 | "pos": [
192 | 1370,
193 | 322
194 | ],
195 | "size": {
196 | "0": 210,
197 | "1": 46
198 | },
199 | "flags": {},
200 | "order": 5,
201 | "mode": 0,
202 | "inputs": [
203 | {
204 | "name": "samples",
205 | "type": "LATENT",
206 | "link": 15
207 | },
208 | {
209 | "name": "vae",
210 | "type": "VAE",
211 | "link": 16
212 | }
213 | ],
214 | "outputs": [
215 | {
216 | "name": "IMAGE",
217 | "type": "IMAGE",
218 | "links": [
219 | 17
220 | ],
221 | "shape": 3,
222 | "slot_index": 0
223 | }
224 | ],
225 | "properties": {
226 | "Node name for S&R": "VAEDecode"
227 | }
228 | },
229 | {
230 | "id": 10,
231 | "type": "LoopBack",
232 | "pos": [
233 | 946,
234 | 313
235 | ],
236 | "size": [
237 | 315,
238 | 498
239 | ],
240 | "flags": {},
241 | "order": 4,
242 | "mode": 0,
243 | "inputs": [
244 | {
245 | "name": "model",
246 | "type": "MODEL",
247 | "link": 10
248 | },
249 | {
250 | "name": "positive",
251 | "type": "CONDITIONING",
252 | "link": 11
253 | },
254 | {
255 | "name": "negative",
256 | "type": "CONDITIONING",
257 | "link": 12
258 | },
259 | {
260 | "name": "latent_image",
261 | "type": "LATENT",
262 | "link": 13
263 | }
264 | ],
265 | "outputs": [
266 | {
267 | "name": "LATENT",
268 | "type": "LATENT",
269 | "links": [
270 | 15
271 | ],
272 | "shape": 3,
273 | "slot_index": 0
274 | }
275 | ],
276 | "properties": {
277 | "Node name for S&R": "LoopBack"
278 | },
279 | "widgets_values": [
280 | 2,
281 | 575095007790149,
282 | "randomize",
283 | 5,
284 | 8,
285 | "euler_ancestral",
286 | "normal",
287 | 0.8799999999999999
288 | ]
289 | }
290 | ],
291 | "links": [
292 | [
293 | 3,
294 | 4,
295 | 1,
296 | 6,
297 | 0,
298 | "CLIP"
299 | ],
300 | [
301 | 5,
302 | 4,
303 | 1,
304 | 7,
305 | 0,
306 | "CLIP"
307 | ],
308 | [
309 | 10,
310 | 4,
311 | 0,
312 | 10,
313 | 0,
314 | "MODEL"
315 | ],
316 | [
317 | 11,
318 | 6,
319 | 0,
320 | 10,
321 | 1,
322 | "CONDITIONING"
323 | ],
324 | [
325 | 12,
326 | 7,
327 | 0,
328 | 10,
329 | 2,
330 | "CONDITIONING"
331 | ],
332 | [
333 | 13,
334 | 5,
335 | 0,
336 | 10,
337 | 3,
338 | "LATENT"
339 | ],
340 | [
341 | 15,
342 | 10,
343 | 0,
344 | 11,
345 | 0,
346 | "LATENT"
347 | ],
348 | [
349 | 16,
350 | 4,
351 | 2,
352 | 11,
353 | 1,
354 | "VAE"
355 | ],
356 | [
357 | 17,
358 | 11,
359 | 0,
360 | 9,
361 | 0,
362 | "IMAGE"
363 | ]
364 | ],
365 | "groups": [],
366 | "config": {},
367 | "extra": {},
368 | "version": 0.4
369 | }
--------------------------------------------------------------------------------
/PromptEdit_Workflow.json:
--------------------------------------------------------------------------------
1 | {
2 | "last_node_id": 26,
3 | "last_link_id": 52,
4 | "nodes": [
5 | {
6 | "id": 7,
7 | "type": "CLIPTextEncode",
8 | "pos": [
9 | 413,
10 | 389
11 | ],
12 | "size": {
13 | "0": 425.27801513671875,
14 | "1": 180.6060791015625
15 | },
16 | "flags": {},
17 | "order": 6,
18 | "mode": 0,
19 | "inputs": [
20 | {
21 | "name": "clip",
22 | "type": "CLIP",
23 | "link": 5
24 | }
25 | ],
26 | "outputs": [
27 | {
28 | "name": "CONDITIONING",
29 | "type": "CONDITIONING",
30 | "links": [
31 | 45
32 | ],
33 | "slot_index": 0
34 | }
35 | ],
36 | "properties": {
37 | "Node name for S&R": "CLIPTextEncode"
38 | },
39 | "widgets_values": [
40 | ""
41 | ]
42 | },
43 | {
44 | "id": 5,
45 | "type": "EmptyLatentImage",
46 | "pos": [
47 | 473,
48 | 609
49 | ],
50 | "size": {
51 | "0": 315,
52 | "1": 106
53 | },
54 | "flags": {},
55 | "order": 0,
56 | "mode": 0,
57 | "outputs": [
58 | {
59 | "name": "LATENT",
60 | "type": "LATENT",
61 | "links": [
62 | 46
63 | ],
64 | "slot_index": 0
65 | }
66 | ],
67 | "properties": {
68 | "Node name for S&R": "EmptyLatentImage"
69 | },
70 | "widgets_values": [
71 | 512,
72 | 512,
73 | 1
74 | ]
75 | },
76 | {
77 | "id": 6,
78 | "type": "CLIPTextEncode",
79 | "pos": [
80 | 415,
81 | 186
82 | ],
83 | "size": {
84 | "0": 422.84503173828125,
85 | "1": 164.31304931640625
86 | },
87 | "flags": {},
88 | "order": 5,
89 | "mode": 0,
90 | "inputs": [
91 | {
92 | "name": "clip",
93 | "type": "CLIP",
94 | "link": 3
95 | }
96 | ],
97 | "outputs": [
98 | {
99 | "name": "CONDITIONING",
100 | "type": "CONDITIONING",
101 | "links": [
102 | 44
103 | ],
104 | "slot_index": 0
105 | }
106 | ],
107 | "properties": {
108 | "Node name for S&R": "CLIPTextEncode"
109 | },
110 | "widgets_values": [
111 | ""
112 | ]
113 | },
114 | {
115 | "id": 4,
116 | "type": "CheckpointLoaderSimple",
117 | "pos": [
118 | 26,
119 | 474
120 | ],
121 | "size": {
122 | "0": 315,
123 | "1": 98
124 | },
125 | "flags": {},
126 | "order": 1,
127 | "mode": 0,
128 | "outputs": [
129 | {
130 | "name": "MODEL",
131 | "type": "MODEL",
132 | "links": [
133 | 43
134 | ],
135 | "slot_index": 0
136 | },
137 | {
138 | "name": "CLIP",
139 | "type": "CLIP",
140 | "links": [
141 | 3,
142 | 5,
143 | 49
144 | ],
145 | "slot_index": 1
146 | },
147 | {
148 | "name": "VAE",
149 | "type": "VAE",
150 | "links": [
151 | 8
152 | ],
153 | "slot_index": 2
154 | }
155 | ],
156 | "properties": {
157 | "Node name for S&R": "CheckpointLoaderSimple"
158 | },
159 | "widgets_values": [
160 | "deliberate_v2.ckpt"
161 | ]
162 | },
163 | {
164 | "id": 15,
165 | "type": "ControlNetLoader",
166 | "pos": [
167 | 64,
168 | 785
169 | ],
170 | "size": {
171 | "0": 315,
172 | "1": 58
173 | },
174 | "flags": {},
175 | "order": 2,
176 | "mode": 0,
177 | "outputs": [
178 | {
179 | "name": "CONTROL_NET",
180 | "type": "CONTROL_NET",
181 | "links": [
182 | 47
183 | ],
184 | "shape": 3,
185 | "slot_index": 0
186 | }
187 | ],
188 | "properties": {
189 | "Node name for S&R": "ControlNetLoader"
190 | },
191 | "widgets_values": [
192 | "t2iadapter_depth_sd15v2.pth"
193 | ]
194 | },
195 | {
196 | "id": 18,
197 | "type": "Text box",
198 | "pos": [
199 | 591,
200 | 928
201 | ],
202 | "size": {
203 | "0": 400,
204 | "1": 200
205 | },
206 | "flags": {},
207 | "order": 3,
208 | "mode": 0,
209 | "outputs": [
210 | {
211 | "name": "STRING",
212 | "type": "STRING",
213 | "links": [
214 | 50
215 | ],
216 | "shape": 3,
217 | "slot_index": 0
218 | }
219 | ],
220 | "properties": {
221 | "Node name for S&R": "Text box"
222 | },
223 | "widgets_values": [
224 | "an placed on a table in a kitchen, [sketch:photorealistic:0.3]"
225 | ]
226 | },
227 | {
228 | "id": 8,
229 | "type": "VAEDecode",
230 | "pos": [
231 | 1305,
232 | 196
233 | ],
234 | "size": {
235 | "0": 210,
236 | "1": 46
237 | },
238 | "flags": {},
239 | "order": 8,
240 | "mode": 0,
241 | "inputs": [
242 | {
243 | "name": "samples",
244 | "type": "LATENT",
245 | "link": 51
246 | },
247 | {
248 | "name": "vae",
249 | "type": "VAE",
250 | "link": 8
251 | }
252 | ],
253 | "outputs": [
254 | {
255 | "name": "IMAGE",
256 | "type": "IMAGE",
257 | "links": [
258 | 9
259 | ],
260 | "slot_index": 0
261 | }
262 | ],
263 | "properties": {
264 | "Node name for S&R": "VAEDecode"
265 | }
266 | },
267 | {
268 | "id": 9,
269 | "type": "SaveImage",
270 | "pos": [
271 | 1590,
272 | 200
273 | ],
274 | "size": {
275 | "0": 210,
276 | "1": 270
277 | },
278 | "flags": {},
279 | "order": 9,
280 | "mode": 0,
281 | "inputs": [
282 | {
283 | "name": "images",
284 | "type": "IMAGE",
285 | "link": 9
286 | }
287 | ],
288 | "properties": {},
289 | "widgets_values": [
290 | "ComfyUI"
291 | ]
292 | },
293 | {
294 | "id": 26,
295 | "type": "LoadImage",
296 | "pos": [
297 | 61,
298 | 899
299 | ],
300 | "size": {
301 | "0": 315,
302 | "1": 314.0000305175781
303 | },
304 | "flags": {},
305 | "order": 4,
306 | "mode": 0,
307 | "outputs": [
308 | {
309 | "name": "IMAGE",
310 | "type": "IMAGE",
311 | "links": [
312 | 52
313 | ],
314 | "shape": 3,
315 | "slot_index": 0
316 | },
317 | {
318 | "name": "MASK",
319 | "type": "MASK",
320 | "links": null,
321 | "shape": 3
322 | }
323 | ],
324 | "properties": {
325 | "Node name for S&R": "LoadImage"
326 | },
327 | "widgets_values": [
328 | "depth5 (2).png",
329 | "image"
330 | ]
331 | },
332 | {
333 | "id": 23,
334 | "type": "KSamplerPromptEditAndAlternate",
335 | "pos": [
336 | 1118,
337 | 297
338 | ],
339 | "size": {
340 | "0": 400,
341 | "1": 602
342 | },
343 | "flags": {},
344 | "order": 7,
345 | "mode": 0,
346 | "inputs": [
347 | {
348 | "name": "model",
349 | "type": "MODEL",
350 | "link": 43
351 | },
352 | {
353 | "name": "positive",
354 | "type": "CONDITIONING",
355 | "link": 44
356 | },
357 | {
358 | "name": "negative",
359 | "type": "CONDITIONING",
360 | "link": 45
361 | },
362 | {
363 | "name": "latent_image",
364 | "type": "LATENT",
365 | "link": 46
366 | },
367 | {
368 | "name": "control_net",
369 | "type": "CONTROL_NET",
370 | "link": 47
371 | },
372 | {
373 | "name": "image",
374 | "type": "IMAGE",
375 | "link": 52
376 | },
377 | {
378 | "name": "clip",
379 | "type": "CLIP",
380 | "link": 49
381 | },
382 | {
383 | "name": "text",
384 | "type": "STRING",
385 | "link": 50,
386 | "widget": {
387 | "name": "text",
388 | "config": [
389 | "STRING",
390 | {
391 | "multiline": true
392 | }
393 | ]
394 | }
395 | }
396 | ],
397 | "outputs": [
398 | {
399 | "name": "LATENT",
400 | "type": "LATENT",
401 | "links": [
402 | 51
403 | ],
404 | "shape": 3,
405 | "slot_index": 0
406 | }
407 | ],
408 | "properties": {
409 | "Node name for S&R": "KSamplerPromptEditAndAlternate"
410 | },
411 | "widgets_values": [
412 | "enable",
413 | 312919440842634,
414 | "randomize",
415 | 20,
416 | 7,
417 | "euler_ancestral",
418 | "normal",
419 | 0,
420 | "disable",
421 | ""
422 | ]
423 | }
424 | ],
425 | "links": [
426 | [
427 | 3,
428 | 4,
429 | 1,
430 | 6,
431 | 0,
432 | "CLIP"
433 | ],
434 | [
435 | 5,
436 | 4,
437 | 1,
438 | 7,
439 | 0,
440 | "CLIP"
441 | ],
442 | [
443 | 8,
444 | 4,
445 | 2,
446 | 8,
447 | 1,
448 | "VAE"
449 | ],
450 | [
451 | 9,
452 | 8,
453 | 0,
454 | 9,
455 | 0,
456 | "IMAGE"
457 | ],
458 | [
459 | 43,
460 | 4,
461 | 0,
462 | 23,
463 | 0,
464 | "MODEL"
465 | ],
466 | [
467 | 44,
468 | 6,
469 | 0,
470 | 23,
471 | 1,
472 | "CONDITIONING"
473 | ],
474 | [
475 | 45,
476 | 7,
477 | 0,
478 | 23,
479 | 2,
480 | "CONDITIONING"
481 | ],
482 | [
483 | 46,
484 | 5,
485 | 0,
486 | 23,
487 | 3,
488 | "LATENT"
489 | ],
490 | [
491 | 47,
492 | 15,
493 | 0,
494 | 23,
495 | 4,
496 | "CONTROL_NET"
497 | ],
498 | [
499 | 49,
500 | 4,
501 | 1,
502 | 23,
503 | 6,
504 | "CLIP"
505 | ],
506 | [
507 | 50,
508 | 18,
509 | 0,
510 | 23,
511 | 7,
512 | "STRING"
513 | ],
514 | [
515 | 51,
516 | 23,
517 | 0,
518 | 8,
519 | 0,
520 | "LATENT"
521 | ],
522 | [
523 | 52,
524 | 26,
525 | 0,
526 | 23,
527 | 5,
528 | "IMAGE"
529 | ]
530 | ],
531 | "groups": [],
532 | "config": {},
533 | "extra": {},
534 | "version": 0.4
535 | }
--------------------------------------------------------------------------------
/WordAsImage_workflow.json:
--------------------------------------------------------------------------------
1 | {
2 | "last_node_id": 33,
3 | "last_link_id": 62,
4 | "nodes": [
5 | {
6 | "id": 7,
7 | "type": "CLIPTextEncode",
8 | "pos": [
9 | 413,
10 | 389
11 | ],
12 | "size": {
13 | "0": 425.27801513671875,
14 | "1": 180.6060791015625
15 | },
16 | "flags": {},
17 | "order": 6,
18 | "mode": 0,
19 | "inputs": [
20 | {
21 | "name": "clip",
22 | "type": "CLIP",
23 | "link": 5
24 | }
25 | ],
26 | "outputs": [
27 | {
28 | "name": "CONDITIONING",
29 | "type": "CONDITIONING",
30 | "links": [
31 | 6
32 | ],
33 | "slot_index": 0
34 | }
35 | ],
36 | "properties": {
37 | "Node name for S&R": "CLIPTextEncode"
38 | },
39 | "widgets_values": [
40 | ""
41 | ]
42 | },
43 | {
44 | "id": 8,
45 | "type": "VAEDecode",
46 | "pos": [
47 | 1209,
48 | 188
49 | ],
50 | "size": {
51 | "0": 210,
52 | "1": 46
53 | },
54 | "flags": {},
55 | "order": 12,
56 | "mode": 0,
57 | "inputs": [
58 | {
59 | "name": "samples",
60 | "type": "LATENT",
61 | "link": 7
62 | },
63 | {
64 | "name": "vae",
65 | "type": "VAE",
66 | "link": 8
67 | }
68 | ],
69 | "outputs": [
70 | {
71 | "name": "IMAGE",
72 | "type": "IMAGE",
73 | "links": [
74 | 9
75 | ],
76 | "slot_index": 0
77 | }
78 | ],
79 | "properties": {
80 | "Node name for S&R": "VAEDecode"
81 | }
82 | },
83 | {
84 | "id": 9,
85 | "type": "SaveImage",
86 | "pos": [
87 | 1451,
88 | 189
89 | ],
90 | "size": {
91 | "0": 210,
92 | "1": 270
93 | },
94 | "flags": {},
95 | "order": 13,
96 | "mode": 0,
97 | "inputs": [
98 | {
99 | "name": "images",
100 | "type": "IMAGE",
101 | "link": 9
102 | }
103 | ],
104 | "properties": {},
105 | "widgets_values": [
106 | "ComfyUI"
107 | ]
108 | },
109 | {
110 | "id": 12,
111 | "type": "PreviewImage",
112 | "pos": [
113 | 764,
114 | 828
115 | ],
116 | "size": {
117 | "0": 210,
118 | "1": 246
119 | },
120 | "flags": {},
121 | "order": 9,
122 | "mode": 0,
123 | "inputs": [
124 | {
125 | "name": "images",
126 | "type": "IMAGE",
127 | "link": 57
128 | }
129 | ],
130 | "properties": {
131 | "Node name for S&R": "PreviewImage"
132 | }
133 | },
134 | {
135 | "id": 6,
136 | "type": "CLIPTextEncode",
137 | "pos": [
138 | 415,
139 | 186
140 | ],
141 | "size": {
142 | "0": 422.84503173828125,
143 | "1": 164.31304931640625
144 | },
145 | "flags": {},
146 | "order": 5,
147 | "mode": 0,
148 | "inputs": [
149 | {
150 | "name": "clip",
151 | "type": "CLIP",
152 | "link": 3
153 | }
154 | ],
155 | "outputs": [
156 | {
157 | "name": "CONDITIONING",
158 | "type": "CONDITIONING",
159 | "links": [
160 | 23
161 | ],
162 | "slot_index": 0
163 | }
164 | ],
165 | "properties": {
166 | "Node name for S&R": "CLIPTextEncode"
167 | },
168 | "widgets_values": [
169 | "cloud shaped stylized font letters"
170 | ]
171 | },
172 | {
173 | "id": 20,
174 | "type": "ControlNetLoader",
175 | "pos": [
176 | 991,
177 | 720
178 | ],
179 | "size": {
180 | "0": 315,
181 | "1": 58
182 | },
183 | "flags": {},
184 | "order": 0,
185 | "mode": 0,
186 | "outputs": [
187 | {
188 | "name": "CONTROL_NET",
189 | "type": "CONTROL_NET",
190 | "links": [
191 | 21
192 | ],
193 | "shape": 3,
194 | "slot_index": 0
195 | }
196 | ],
197 | "properties": {
198 | "Node name for S&R": "ControlNetLoader"
199 | },
200 | "widgets_values": [
201 | "t2iadapter_depth_sd15v2.pth"
202 | ]
203 | },
204 | {
205 | "id": 4,
206 | "type": "CheckpointLoaderSimple",
207 | "pos": [
208 | 26,
209 | 474
210 | ],
211 | "size": {
212 | "0": 315,
213 | "1": 98
214 | },
215 | "flags": {},
216 | "order": 1,
217 | "mode": 0,
218 | "outputs": [
219 | {
220 | "name": "MODEL",
221 | "type": "MODEL",
222 | "links": [
223 | 1
224 | ],
225 | "slot_index": 0
226 | },
227 | {
228 | "name": "CLIP",
229 | "type": "CLIP",
230 | "links": [
231 | 3,
232 | 5
233 | ],
234 | "slot_index": 1
235 | },
236 | {
237 | "name": "VAE",
238 | "type": "VAE",
239 | "links": [
240 | 8
241 | ],
242 | "slot_index": 2
243 | }
244 | ],
245 | "properties": {
246 | "Node name for S&R": "CheckpointLoaderSimple"
247 | },
248 | "widgets_values": [
249 | "deliberate_v2.ckpt"
250 | ]
251 | },
252 | {
253 | "id": 32,
254 | "type": "Integer",
255 | "pos": [
256 | -528,
257 | 1196
258 | ],
259 | "size": {
260 | "0": 315,
261 | "1": 58
262 | },
263 | "flags": {},
264 | "order": 2,
265 | "mode": 0,
266 | "outputs": [
267 | {
268 | "name": "INT",
269 | "type": "INT",
270 | "links": [
271 | 51,
272 | 52,
273 | 53
274 | ],
275 | "shape": 3,
276 | "slot_index": 0
277 | }
278 | ],
279 | "properties": {
280 | "Node name for S&R": "Integer"
281 | },
282 | "widgets_values": [
283 | 143
284 | ]
285 | },
286 | {
287 | "id": 11,
288 | "type": "CompositeImage",
289 | "pos": [
290 | 371,
291 | 824
292 | ],
293 | "size": {
294 | "0": 315,
295 | "1": 102
296 | },
297 | "flags": {},
298 | "order": 8,
299 | "mode": 0,
300 | "inputs": [
301 | {
302 | "name": "bg_image",
303 | "type": "IMAGE",
304 | "link": 60
305 | },
306 | {
307 | "name": "inp_img",
308 | "type": "IMAGE",
309 | "link": 11
310 | }
311 | ],
312 | "outputs": [
313 | {
314 | "name": "IMAGE",
315 | "type": "IMAGE",
316 | "links": [
317 | 57,
318 | 58
319 | ],
320 | "shape": 3,
321 | "slot_index": 0
322 | }
323 | ],
324 | "properties": {
325 | "Node name for S&R": "CompositeImage"
326 | },
327 | "widgets_values": [
328 | 10,
329 | 166
330 | ]
331 | },
332 | {
333 | "id": 13,
334 | "type": "Image Blank",
335 | "pos": [
336 | -90,
337 | 1166
338 | ],
339 | "size": {
340 | "0": 315,
341 | "1": 154
342 | },
343 | "flags": {},
344 | "order": 7,
345 | "mode": 0,
346 | "inputs": [
347 | {
348 | "name": "red",
349 | "type": "INT",
350 | "link": 51,
351 | "widget": {
352 | "name": "red",
353 | "config": [
354 | "INT",
355 | {
356 | "default": 255,
357 | "min": 0,
358 | "max": 255,
359 | "step": 1
360 | }
361 | ]
362 | }
363 | },
364 | {
365 | "name": "green",
366 | "type": "INT",
367 | "link": 52,
368 | "widget": {
369 | "name": "green",
370 | "config": [
371 | "INT",
372 | {
373 | "default": 255,
374 | "min": 0,
375 | "max": 255,
376 | "step": 1
377 | }
378 | ]
379 | }
380 | },
381 | {
382 | "name": "blue",
383 | "type": "INT",
384 | "link": 53,
385 | "widget": {
386 | "name": "blue",
387 | "config": [
388 | "INT",
389 | {
390 | "default": 255,
391 | "min": 0,
392 | "max": 255,
393 | "step": 1
394 | }
395 | ]
396 | }
397 | }
398 | ],
399 | "outputs": [
400 | {
401 | "name": "IMAGE",
402 | "type": "IMAGE",
403 | "links": [
404 | 60
405 | ],
406 | "shape": 3,
407 | "slot_index": 0
408 | }
409 | ],
410 | "properties": {
411 | "Node name for S&R": "Image Blank"
412 | },
413 | "widgets_values": [
414 | 768,
415 | 512,
416 | 141,
417 | 141,
418 | 141
419 | ]
420 | },
421 | {
422 | "id": 10,
423 | "type": "WordAsImage",
424 | "pos": [
425 | -105,
426 | 713
427 | ],
428 | "size": {
429 | "0": 315,
430 | "1": 394
431 | },
432 | "flags": {},
433 | "order": 3,
434 | "mode": 0,
435 | "outputs": [
436 | {
437 | "name": "IMAGE",
438 | "type": "IMAGE",
439 | "links": [
440 | 11
441 | ],
442 | "shape": 3,
443 | "slot_index": 0
444 | }
445 | ],
446 | "properties": {
447 | "Node name for S&R": "WordAsImage"
448 | },
449 | "widgets_values": [
450 | "ComfyUI",
451 | "disable",
452 | "q4",
453 | 45,
454 | 5,
455 | "LiberationSans-Regular.ttf",
456 | 0,
457 | 182,
458 | "disable",
459 | 0.35000000000000014,
460 | 0.41000000000000014,
461 | 255,
462 | 5,
463 | "disable",
464 | 10
465 | ]
466 | },
467 | {
468 | "id": 3,
469 | "type": "KSampler",
470 | "pos": [
471 | 863,
472 | 186
473 | ],
474 | "size": {
475 | "0": 315,
476 | "1": 474
477 | },
478 | "flags": {},
479 | "order": 11,
480 | "mode": 0,
481 | "inputs": [
482 | {
483 | "name": "model",
484 | "type": "MODEL",
485 | "link": 1
486 | },
487 | {
488 | "name": "positive",
489 | "type": "CONDITIONING",
490 | "link": 24
491 | },
492 | {
493 | "name": "negative",
494 | "type": "CONDITIONING",
495 | "link": 6
496 | },
497 | {
498 | "name": "latent_image",
499 | "type": "LATENT",
500 | "link": 62
501 | }
502 | ],
503 | "outputs": [
504 | {
505 | "name": "LATENT",
506 | "type": "LATENT",
507 | "links": [
508 | 7
509 | ],
510 | "slot_index": 0
511 | }
512 | ],
513 | "properties": {
514 | "Node name for S&R": "KSampler"
515 | },
516 | "widgets_values": [
517 | 550305172962533,
518 | "randomize",
519 | 53,
520 | 8,
521 | "euler_ancestral",
522 | "normal",
523 | 0.6199999999999999
524 | ]
525 | },
526 | {
527 | "id": 19,
528 | "type": "ControlNetApply",
529 | "pos": [
530 | 1341.4100422363274,
531 | 693.3911348571775
532 | ],
533 | "size": {
534 | "0": 317.4000244140625,
535 | "1": 98
536 | },
537 | "flags": {},
538 | "order": 10,
539 | "mode": 0,
540 | "inputs": [
541 | {
542 | "name": "conditioning",
543 | "type": "CONDITIONING",
544 | "link": 23
545 | },
546 | {
547 | "name": "control_net",
548 | "type": "CONTROL_NET",
549 | "link": 21
550 | },
551 | {
552 | "name": "image",
553 | "type": "IMAGE",
554 | "link": 58
555 | }
556 | ],
557 | "outputs": [
558 | {
559 | "name": "CONDITIONING",
560 | "type": "CONDITIONING",
561 | "links": [
562 | 24
563 | ],
564 | "shape": 3,
565 | "slot_index": 0
566 | }
567 | ],
568 | "properties": {
569 | "Node name for S&R": "ControlNetApply"
570 | },
571 | "widgets_values": [
572 | 0.43000000000000005
573 | ]
574 | },
575 | {
576 | "id": 5,
577 | "type": "EmptyLatentImage",
578 | "pos": [
579 | 473,
580 | 609
581 | ],
582 | "size": {
583 | "0": 315,
584 | "1": 106
585 | },
586 | "flags": {},
587 | "order": 4,
588 | "mode": 0,
589 | "outputs": [
590 | {
591 | "name": "LATENT",
592 | "type": "LATENT",
593 | "links": [
594 | 62
595 | ],
596 | "slot_index": 0
597 | }
598 | ],
599 | "properties": {
600 | "Node name for S&R": "EmptyLatentImage"
601 | },
602 | "widgets_values": [
603 | 768,
604 | 512,
605 | 1
606 | ]
607 | }
608 | ],
609 | "links": [
610 | [
611 | 1,
612 | 4,
613 | 0,
614 | 3,
615 | 0,
616 | "MODEL"
617 | ],
618 | [
619 | 3,
620 | 4,
621 | 1,
622 | 6,
623 | 0,
624 | "CLIP"
625 | ],
626 | [
627 | 5,
628 | 4,
629 | 1,
630 | 7,
631 | 0,
632 | "CLIP"
633 | ],
634 | [
635 | 6,
636 | 7,
637 | 0,
638 | 3,
639 | 2,
640 | "CONDITIONING"
641 | ],
642 | [
643 | 7,
644 | 3,
645 | 0,
646 | 8,
647 | 0,
648 | "LATENT"
649 | ],
650 | [
651 | 8,
652 | 4,
653 | 2,
654 | 8,
655 | 1,
656 | "VAE"
657 | ],
658 | [
659 | 9,
660 | 8,
661 | 0,
662 | 9,
663 | 0,
664 | "IMAGE"
665 | ],
666 | [
667 | 11,
668 | 10,
669 | 0,
670 | 11,
671 | 1,
672 | "IMAGE"
673 | ],
674 | [
675 | 21,
676 | 20,
677 | 0,
678 | 19,
679 | 1,
680 | "CONTROL_NET"
681 | ],
682 | [
683 | 23,
684 | 6,
685 | 0,
686 | 19,
687 | 0,
688 | "CONDITIONING"
689 | ],
690 | [
691 | 24,
692 | 19,
693 | 0,
694 | 3,
695 | 1,
696 | "CONDITIONING"
697 | ],
698 | [
699 | 51,
700 | 32,
701 | 0,
702 | 13,
703 | 0,
704 | "INT"
705 | ],
706 | [
707 | 52,
708 | 32,
709 | 0,
710 | 13,
711 | 1,
712 | "INT"
713 | ],
714 | [
715 | 53,
716 | 32,
717 | 0,
718 | 13,
719 | 2,
720 | "INT"
721 | ],
722 | [
723 | 57,
724 | 11,
725 | 0,
726 | 12,
727 | 0,
728 | "IMAGE"
729 | ],
730 | [
731 | 58,
732 | 11,
733 | 0,
734 | 19,
735 | 2,
736 | "IMAGE"
737 | ],
738 | [
739 | 60,
740 | 13,
741 | 0,
742 | 11,
743 | 0,
744 | "IMAGE"
745 | ],
746 | [
747 | 62,
748 | 5,
749 | 0,
750 | 3,
751 | 3,
752 | "LATENT"
753 | ]
754 | ],
755 | "groups": [],
756 | "config": {},
757 | "extra": {},
758 | "version": 0.4
759 | }
--------------------------------------------------------------------------------
/SyrianFalconNodes.py:
--------------------------------------------------------------------------------
1 | from tkinter import Y
2 | import torch
3 | from PIL import Image, ImageDraw, ImageFont, ImageEnhance, ImageOps
4 | import PIL
5 | import torch.nn.functional as F
6 | import cv2
7 | import os
8 | import torch
9 | import sys
10 | import json
11 | import hashlib
12 | import traceback
13 | import time
14 | import re
15 | import glob
16 | from PIL.PngImagePlugin import PngInfo
17 | import numpy as np
18 | import safetensors.torch
19 | import random
20 | sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy"))
21 |
22 |
23 | import comfy.diffusers_load
24 | import comfy.samplers
25 | import comfy.sample
26 | import comfy.sd
27 | import comfy.utils
28 |
29 | import comfy.clip_vision
30 |
31 | import comfy.model_management
32 | import importlib
33 |
34 | import folder_paths
35 | import latent_preview
36 |
37 | import math
38 | import struct
39 |
40 | dir = ""
41 | fntslist = []
42 | fntsdir = os.listdir("/usr/share/fonts/truetype/")
43 | for f in fntsdir:
44 | try:
45 | if os.path.isdir("/usr/share/fonts/truetype/"+f):
46 | os.chdir("/usr/share/fonts/truetype/"+f)
47 | for file in glob.glob("*.ttf"):
48 | fntslist.append(file)
49 | except:
50 | dir = "/usr/share/fonts/truetype/freefont/"
51 | os.chdir("/usr/share/fonts/truetype/"+f)
52 | for file in glob.glob("*.ttf"):
53 | fntslist.append(file)
54 |
55 |
56 |
57 |
58 | class WordAsImage:
59 | def __init__(self):
60 | pass
61 |
62 | @classmethod
63 | def INPUT_TYPES(s):
64 | return {
65 | "required": {
66 | "text": ("STRING", {"default": '', "multiline": False}),
67 | "add_depth": (["enable", "disable"],),
68 | "depth_direction": (["q1", "q2","q3","q4"],),
69 | "depth_factor": ("INT", {
70 | "default": 10,
71 | "min": 0,
72 | "max": 360,
73 | "step": 1,
74 | }),
75 | "letter_thickness": ("INT", {
76 | "default": 10,
77 | "min": 0,
78 | "max": 360,
79 | "step": 1,
80 | }),
81 | "font": (fntslist,),
82 | "rotate": ("INT", {
83 | "default": 0,
84 | "min": -360,
85 | "max": 360,
86 | "step": 1,
87 | }),
88 | "size": ("INT", {
89 | "default": 60,
90 | "min": 0,
91 | "max": 1000,
92 | "step": 1,
93 | }),
94 | "rotate_3d": (["enable", "disable"],),
95 | "w": ("FLOAT", {
96 | "default": 0,
97 | "min": 0,
98 | "max": 1,
99 | "step": 0.01,
100 | }),
101 | "h": ("FLOAT", {
102 | "default": 0,
103 | "min": 0,
104 | "max": 1,
105 | "step": 0.01,
106 | }),
107 | "distance": ("INT", {
108 | "default": 255,
109 | "min": 0,
110 | "max": 255,
111 | "step": 1,
112 | }),
113 | "fade": ("INT", {
114 | "default": 5,
115 | "min": 0,
116 | "max": 255,
117 | "step": 1,
118 | }),
119 | "add_border":(["enable","disable"],),
120 | "border_thickness": ("INT", {
121 | "default": 10,
122 | "min": 0,
123 | "max": 360,
124 | "step": 1,
125 | }) }
126 | }
127 |
128 | RETURN_TYPES = ("IMAGE",)
129 | FUNCTION = "word_as_image"
130 |
131 | CATEGORY = "SyrianFalcon/nodes"
132 |
133 | def word_as_image(self, text: str, add_depth: str,font: str,rotate: int,add_border:str,size: int,depth_factor:int,border_thickness:int,letter_thickness:int,rotate_3d:str,w:float,h:float,distance:int,fade:int,depth_direction:str):
134 | unicode_text = text
135 | tp = font
136 | try:
137 | font = ImageFont.truetype("C:\\Windows\\Fonts\\"+tp, size, encoding="unic")
138 | except:
139 | font = ImageFont.truetype("/usr/share/fonts/truetype/freefont/"+tp, size, encoding="unic")
140 |
141 | # get the line size
142 | text_width, text_height = font.getsize(unicode_text)
143 | # create a blank canvas with extra space between lines
144 | newcanvas = Image.new('RGBA', (text_width+20 +letter_thickness+ border_thickness+depth_factor,text_height+20 + border_thickness+depth_factor+letter_thickness),(0,0,0,0))
145 | canvas = Image.new('L', (text_width+20 + border_thickness+depth_factor+letter_thickness, text_height+20 + border_thickness+depth_factor+letter_thickness),"black")
146 | # draw the text onto the text canvas, and use blue as the text color
147 | mask_img = Image.new('L', canvas.size, 0)
148 | draw = ImageDraw.Draw(canvas)
149 | if add_border == "enable":
150 | draw.rectangle([0, 0, text_width+30, text_height+30],fill="white")
151 | draw.rectangle([border_thickness, border_thickness, text_width+30-border_thickness, text_height+30-border_thickness],fill="black")
152 |
153 | if add_depth == "enable":
154 | for i in range(0,depth_factor):
155 | clr = int(distance/depth_factor)
156 | x = distance - int(distance/depth_factor)*depth_factor
157 | if depth_direction == "q1":
158 | draw.text((1*i, -i*1),unicode_text ,(clr*i+x),font=font,stroke_width = letter_thickness,stroke_fill=(clr*i+x))
159 | elif depth_direction =="q2":
160 | draw.text((-1*i, -i*1),unicode_text ,(clr*i+x),font=font,stroke_width = letter_thickness,stroke_fill=(clr*i+x))
161 | elif depth_direction == "q3":
162 | draw.text((-1*i, i*1),unicode_text ,(clr*i+x),font=font,stroke_width = letter_thickness,stroke_fill=(clr*i+x))
163 | elif depth_direction == "q4":
164 | draw.text((1*i, i*1),unicode_text ,(clr*i+x),font=font,stroke_width = letter_thickness,stroke_fill=(clr*i+x))
165 |
166 | else:
167 | draw.text((0+letter_thickness, 0),text,(distance),font=font,stroke_width = letter_thickness,stroke_fill=(distance))
168 | newcanvas.paste(canvas,(0,0),canvas)
169 | canvas = newcanvas
170 | if rotate > 0:
171 | canvas = canvas.rotate(rotate, PIL.Image.NEAREST, expand = 1)
172 |
173 | if rotate_3d == "enable":
174 | newcanvas = Image.new('RGBA', (text_width*2,text_height*2),(0,0,0,0))
175 | clrs = []
176 | for idx,i in enumerate(unicode_text):
177 | clr = int(255/(len(unicode_text)+fade))
178 | x = 255 - int(255/(len(unicode_text)+fade))*(len(unicode_text)+fade)
179 | newclr = clr*(len(unicode_text)+fade-idx)+x
180 | clrs.append(newclr)
181 | print(clrs)
182 | for i in range(0,depth_factor):
183 | canvas = Image.new('L', (text_width*2,text_height*2),"black")
184 | def find_coeffs(pa, pb):
185 | matrix = []
186 | for p1, p2 in zip(pa, pb):
187 | matrix.append([p1[0], p1[1], 1, 0, 0, 0, -p2[0]*p1[0], -p2[0]*p1[1]])
188 | matrix.append([0, 0, 0, p1[0], p1[1], 1, -p2[1]*p1[0], -p2[1]*p1[1]])
189 |
190 | A = np.matrix(matrix, dtype=float)
191 | B = np.array(pb).reshape(8)
192 |
193 | res = np.dot(np.linalg.inv(A.T * A) * A.T, B)
194 | return np.array(res).reshape(8)
195 | width,height = canvas.size
196 | h1 = height*h
197 | l = 1-h1
198 | w1 = width*w
199 | coeffs = find_coeffs(
200 | [(0+w1, 0), (width-w1, 0+h1), (width-w1, height-h1), (0+w1, height)],
201 | [(0, 0), (width, 0), (width, height), (0, height)])
202 | mask_img = Image.new('L', canvas.size, 0)
203 | draw = ImageDraw.Draw(canvas)
204 | f = 0
205 |
206 | for idx,j in enumerate(unicode_text):
207 | clr = int(clrs[idx]/depth_factor)
208 | x = clrs[idx] - int(clrs[idx]/depth_factor)*depth_factor
209 | draw.text((f, 0),j ,(clr*(i+1)+x),font=font,stroke_width = letter_thickness,stroke_fill=(clr*i+x))
210 | f = font.getsize(unicode_text[0:idx+1])[0]
211 | if rotate > 0:
212 | canvas = canvas.rotate(rotate, PIL.Image.NEAREST, expand = 1)
213 | canvas = canvas.transform((width, height), Image.PERSPECTIVE, coeffs,
214 | Image.BICUBIC)
215 | if depth_direction == "q1":
216 | newcanvas.paste(canvas,(0+i,0-i+depth_factor),canvas)
217 | elif depth_direction == "q2":
218 | newcanvas.paste(canvas,(0-i+depth_factor,0-i+depth_factor),canvas)
219 | elif depth_direction == "q3":
220 | newcanvas.paste(canvas,(0-i+depth_factor,0+i),canvas)
221 | elif depth_direction == "q4":
222 | newcanvas.paste(canvas,(0+i,0+i),canvas)
223 | canvas = newcanvas
224 | image = np.array(canvas).astype(np.float32) / 255.0
225 | image = torch.from_numpy(image)[None,]
226 | return (image,)
227 |
228 | class QRGenerate:
229 | def __init__(self):
230 | pass
231 |
232 | @classmethod
233 | def INPUT_TYPES(s):
234 | return {
235 | "required": {
236 | "data_url": ("STRING", {"default": '', "multiline": False}),
237 | "add_depth": (["enable", "disable"],),
238 | "depth_factor": ("INT", {
239 | "default": 10,
240 | "min": 0,
241 | "max": 360,
242 | "step": 1,
243 | }),
244 | "version": ("INT", {
245 | "default": 10,
246 | "min": 0,
247 | "max": 360,
248 | "step": 1,
249 | }),
250 | "rotate": ("INT", {
251 | "default": 0,
252 | "min": -360,
253 | "max": 360,
254 | "step": 1,
255 | }) }
256 | }
257 |
258 | RETURN_TYPES = ("IMAGE",)
259 | FUNCTION = "word_as_image"
260 |
261 | CATEGORY = "SyrianFalcon/nodes"
262 |
263 | def word_as_image(self, data_url: str, add_depth: str,rotate: int,version:int,depth_factor:int):
264 | try:
265 | import qrcode,sys,subprocess
266 |
267 | except:
268 | import pip
269 |
270 | def install(package):
271 | if hasattr(pip, 'main'):
272 | pip.main(['install', package])
273 | else:
274 | pip._internal.main(['install', package])
275 | install("qrcode")
276 | import qrcode,sys,subprocess
277 | from PIL import Image, ImageDraw, ImageFont, ImageEnhance, ImageOps
278 | import time
279 | data = data_url
280 |
281 | # Creating an instance of QRCode class
282 | qr = qrcode.QRCode(version = version,
283 | box_size = 20,
284 | border = 5)
285 |
286 | # Adding data to the instance 'qr'
287 | qr.add_data(data)
288 |
289 | qr.make(fit = True)
290 | img = qr.make_image(fill_color = (125,125,125),
291 | back_color = 'transparent')
292 | img = img.resize((512,512))
293 | newbckgrnd = Image.new("RGBA",(512,512),(0,0,0,0))
294 | bckgrnd = Image.new("L",(512,512),"black")
295 | clr = int(-255/depth_factor)
296 | x = 1 - int(-255/depth_factor)*depth_factor
297 | data = img.getdata()
298 | if add_depth == "enable":
299 | for i in range(0,depth_factor):
300 | new_image_data = []
301 | for item in data:
302 | if item[0] in range(1,255):
303 | new_image_data.append((255, 255, 255,199))
304 | else:
305 | new_image_data.append(item)
306 | img.putdata(new_image_data)
307 | bckgrnd.paste(img,(0+i*1,0+i*1),img)
308 | #bckgrnd.paste(img,(0,0),img)
309 | if rotate >0:
310 | bckgrnd = bckgrnd.rotate(rotate, Image.NEAREST, expand = 1)
311 | data = bckgrnd.getdata()
312 | new_image_data = []
313 | for item in data:
314 | if item==0:
315 | new_image_data.append(0)
316 | else:
317 | new_image_data.append(item)
318 | bckgrnd.putdata(new_image_data)
319 | newbckgrnd.paste(bckgrnd,(0,0),bckgrnd)
320 | image = np.array(newbckgrnd).astype(np.float32) / 255.0
321 | image = torch.from_numpy(image)[None,]
322 | return (image,)
323 |
324 | def alternatingtokenfunc(prompt):
325 | newprompt = prompt
326 | occurslist = []
327 | while len(newprompt)>0:
328 | occurs = re.search("\<[^<]+\|[^<]+\>",newprompt)
329 | if occurs:
330 | oldoccurs = re.search(re.escape(newprompt[occurs.span()[0]:occurs.span()[1]]),prompt)
331 | occurslist.append([newprompt[occurs.span()[0]:occurs.span()[1]],oldoccurs.span()])
332 | newprompt = newprompt[occurs.span()[1]+1:]
333 | else:
334 | newprompt = ""
335 | for i in occurslist:
336 | i.append(i[0][1:-1].split("|"))
337 | i.append(0)
338 | for i in occurslist:
339 | if len(i[0])<=1:
340 | occurslist.remove(i)
341 | return occurslist
342 |
343 |
344 | def promptscreator(text,steps):
345 | prompt = text
346 | checkpoints = []
347 | replaces = {}
348 | additions = {}
349 | removes = {}
350 | initials = []
351 | numloop = []
352 | total = steps
353 | prompts = []
354 | occs = re.findall("\[[^\[]*]",prompt)
355 | #for i in occs:
356 | #checkpoints.append(re.search(occs[1][1:-1],prompt).span())
357 | for idx,i in enumerate(occs):
358 | #c = int(re.findall("[\d][^\]]*",i)[0][:]) if float(re.findall("[\d][^\]]*",i)[0][:])>=1 else int(float(re.findall("[\d][^\]]*",i)[0][:])*total)
359 | if len(re.findall(":",i)) > 1:
360 | if re.search("::",i):
361 | c = int(i[re.search("::",i).span()[1]:-1]) if float(i[re.search("::",i).span()[1]:-1])>=1 else int(float(i[re.search("::",i).span()[1]:-1])*total)
362 | if c not in numloop:
363 | numloop.append(c)
364 | removes[c] = [i,re.findall(".*:",i)[0][1:-2],re.findall(".*:",i)[0][1:-2]]
365 | initials.append([i,re.findall(".*:",i)[0][1:-2]])
366 | else:
367 | i2 = i[re.search(":",i).span()[0]+1:]
368 |
369 | c = int(i2[re.search(":",i2).span()[0]+1:-1]) if float(i2[re.search(":",i2).span()[0]+1:-1])>=1 else int(float(i2[re.search(":",i2).span()[0]+1:-1])*total)
370 | if c not in numloop:
371 | numloop.append(c)
372 |
373 | replaces[c] = [i,re.findall(":.*:",i)[0][1:-1],re.findall("[^:]*:",i)[0][1:-1]]
374 | initials.append([i,re.findall("[^:]*:",i)[0][1:-1]])
375 | elif len(re.findall("\|",i)) >= 1:
376 | continue
377 | else:
378 | c = int(i[re.search(":",i).span()[0]+1:-1]) if float(i[re.search(":",i).span()[0]+1:-1])>=1 else int(float(i[re.search(":",i).span()[0]+1:-1])*total)
379 | if c not in numloop:
380 | numloop.append(c)
381 |
382 | additions[c] = [i,re.findall(".*:",i)[0][1:-1],re.findall(".*:",i)[0][1:-1],re.search(i[1:-1],prompt).span()[0]]
383 | #initials.append([i,""])
384 |
385 |
386 |
387 | numloop.sort()
388 |
389 | for i in initials:
390 | prompt = re.sub("."+re.escape(i[0][1:-1])+".",i[1],prompt,1)
391 |
392 | prompts.append(prompt)
393 | for x in numloop:
394 | if x in replaces:
395 | prompt = re.sub(re.escape(replaces[x][2]),replaces[x][1],prompt,1)
396 |
397 | if x in removes:
398 | prompt = re.sub(removes[x][2],"",prompt,1)
399 | prompts.append(prompt)
400 | if len(prompts)==0:
401 | prompts.append(prompt)
402 | #print(prompts)
403 | numloop.append(steps-1)
404 | keys = []
405 | for j in additions:
406 | keys.append(j)
407 | keys.sort()
408 | print(keys)
409 | for a in keys:
410 | prompts[0] = re.sub(re.escape(additions[a][0]),"",prompts[0])
411 | for idx,j in enumerate(numloop):
412 | if a>j:
413 | prompts[idx+1] = re.sub(re.escape(additions[a][0]),"",prompts[idx+1])
414 | else:
415 | prompts[idx+1] = re.sub(re.escape(additions[a][0]),additions[a][2],prompts[idx+1])
416 | return prompts,numloop
417 |
418 | def common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent, denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False,flag=False):
419 |
420 | device = comfy.model_management.get_torch_device()
421 | latent_image = latent["samples"]
422 |
423 | if disable_noise:
424 | noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu")
425 | else:
426 | batch_inds = latent["batch_index"] if "batch_index" in latent else None
427 | noise = comfy.sample.prepare_noise(latent_image, seed, batch_inds)
428 |
429 | noise_mask = None
430 | if "noise_mask" in latent:
431 | noise_mask = latent["noise_mask"]
432 |
433 | preview_format = "JPEG"
434 | if preview_format not in ["JPEG", "PNG"]:
435 | preview_format = "JPEG"
436 |
437 | previewer = latent_preview.get_previewer(device)
438 |
439 | pbar = comfy.utils.ProgressBar(steps)
440 | def callback(step, x0, x, total_steps):
441 | preview_bytes = None
442 | if previewer:
443 | preview_bytes = previewer.decode_latent_to_preview_image(preview_format, x0)
444 | pbar.update_absolute(step + 1, total_steps, preview_bytes)
445 |
446 | samples = comfy.sample.sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image,
447 | denoise=denoise, disable_noise=disable_noise, start_step=start_step, last_step=last_step,
448 | force_full_denoise=force_full_denoise, noise_mask=noise_mask, callback=callback)
449 | out = latent.copy()
450 | out["samples"] = samples
451 | if not flag:
452 | return out
453 | else:
454 | return (out, )
455 |
456 |
457 |
458 |
459 | class KSamplerPromptEdit:
460 | @classmethod
461 | def INPUT_TYPES(s):
462 | return {"required":
463 | {"model": ("MODEL",),
464 | "add_noise": (["enable", "disable"], ),
465 | "noise_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
466 | "steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
467 | "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}),
468 | "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
469 | "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ),
470 | "positive": ("CONDITIONING", ),
471 | "negative": ("CONDITIONING", ),
472 | "latent_image": ("LATENT", ),
473 | "control_net": ("CONTROL_NET", ),
474 | "image": ("IMAGE", ),
475 | "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
476 | "return_with_leftover_noise": (["disable", "enable"], ),"clip": ("CLIP", ),"text": ("STRING", {"multiline": True})
477 |
478 | }
479 | }
480 |
481 | RETURN_TYPES = ("LATENT",)
482 | FUNCTION = "sample"
483 |
484 | CATEGORY = "SyrianFalcon/nodes"
485 | def sample(self, clip,model, add_noise, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, text, return_with_leftover_noise,control_net,image,strength, denoise=1.0):
486 | force_full_denoise = True
487 | if return_with_leftover_noise == "enable":
488 | force_full_denoise = False
489 | disable_noise = False
490 | if add_noise == "disable":
491 | disable_noise = True
492 |
493 | prompts,numloop = promptscreator(text,steps)
494 | if len(prompts) ==0:
495 | prompts.append(text)
496 | if len(numloop) < 1:
497 | numloop.append(steps-1)
498 | positive = ([[clip.encode(prompts[0]), {}]] )
499 | try:
500 | if strength == 0:
501 | pass
502 | else:
503 | c = []
504 | control_hint = image.movedim(-1,1)
505 | for t in positive:
506 | n = [t[0], t[1].copy()]
507 | c_net = control_net.copy().set_cond_hint(control_hint, strength)
508 | if 'control' in t[1]:
509 | c_net.set_previous_controlnet(t[1]['control'])
510 | n[1]['control'] = c_net
511 | c.append(n)
512 | positive = c
513 | except:
514 | pass
515 | c1 = common_ksampler(model, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise, disable_noise=disable_noise, start_step=0, last_step=numloop[0], force_full_denoise=force_full_denoise)
516 |
517 | for i in range(0,len(numloop)-1):
518 | positive = ([[clip.encode(prompts[i]), {}]] )
519 | try:
520 | if strength == 0:
521 | pass
522 | else:
523 | c = []
524 | control_hint = image.movedim(-1,1)
525 | for t in positive:
526 | n = [t[0], t[1].copy()]
527 | c_net = control_net.copy().set_cond_hint(control_hint, strength)
528 | if 'control' in t[1]:
529 | c_net.set_previous_controlnet(t[1]['control'])
530 | n[1]['control'] = c_net
531 | c.append(n)
532 | positive = c
533 | except:
534 | pass
535 | c1 = common_ksampler(model, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, c1, denoise=denoise, disable_noise=disable_noise, start_step=numloop[i], last_step=numloop[i+1], force_full_denoise=force_full_denoise)
536 | positive = ([[clip.encode(prompts[len(prompts)-1]), {}]] )
537 | try:
538 | if strength == 0:
539 | pass
540 | else:
541 | c = []
542 | control_hint = image.movedim(-1,1)
543 | for t in positive:
544 | n = [t[0], t[1].copy()]
545 | c_net = control_net.copy().set_cond_hint(control_hint, strength)
546 | if 'control' in t[1]:
547 | c_net.set_previous_controlnet(t[1]['control'])
548 | n[1]['control'] = c_net
549 | c.append(n)
550 | positive = c
551 | except:
552 | pass
553 | return common_ksampler(model, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, c1, denoise=denoise, disable_noise=disable_noise, start_step=numloop[len(numloop)-1], last_step=steps, force_full_denoise=force_full_denoise,flag=True)
554 |
555 |
556 | class KSamplerAlternate:
557 | @classmethod
558 | def INPUT_TYPES(s):
559 | return {"required":
560 | {"model": ("MODEL",),
561 | "add_noise": (["enable", "disable"], ),
562 | "noise_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
563 | "steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
564 | "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}),
565 | "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
566 | "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ),
567 | "positive": ("CONDITIONING", ),
568 | "negative": ("CONDITIONING", ),
569 | "latent_image": ("LATENT", ),
570 | "control_net": ("CONTROL_NET", ),
571 | "image": ("IMAGE", ),
572 | "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
573 | "return_with_leftover_noise": (["disable", "enable"], ),"clip": ("CLIP", ),"text": ("STRING", {"multiline": True})
574 |
575 | }
576 | }
577 |
578 | RETURN_TYPES = ("LATENT",)
579 | FUNCTION = "sample"
580 |
581 | CATEGORY = "SyrianFalcon/nodes"
582 | def sample(self, clip,model, add_noise, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, text, return_with_leftover_noise,control_net,image,strength, denoise=1.0):
583 | force_full_denoise = True
584 | if return_with_leftover_noise == "enable":
585 | force_full_denoise = False
586 | disable_noise = False
587 | if add_noise == "disable":
588 | disable_noise = True
589 | arr = alternatingtokenfunc(text)
590 | newprompt = text
591 | for j in arr:
592 | if j[3]>=len(j[2]):
593 | j[3] = 0
594 | newerprompt = re.sub(re.escape(j[0]),j[2][j[3]],newprompt)
595 | newprompt = re.sub(re.escape(j[0]),j[2][j[3]],newprompt)
596 | j[3]+=1
597 | positive = ([[clip.encode(newerprompt), {}]] )
598 | c1 = common_ksampler(model, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise, disable_noise=disable_noise, start_step=0, last_step=1, force_full_denoise=force_full_denoise)
599 | for i in range(0,steps-1):
600 | newprompt = text
601 | for j in arr:
602 | if j[3]>=len(j[2]):
603 | j[3] = 0
604 | newerprompt = re.sub(re.escape(j[0]),j[2][j[3]],newprompt)
605 | newprompt = re.sub(re.escape(j[0]),j[2][j[3]],newprompt)
606 | j[3]+=1
607 | positive = ([[clip.encode(newerprompt), {}]] )
608 | c1 = common_ksampler(model, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, c1, denoise=denoise, disable_noise=disable_noise, start_step=i, last_step=i+1, force_full_denoise=force_full_denoise)
609 | newprompt = text
610 | for j in arr:
611 | if j[3]>=len(j[2]):
612 | j[3] = 0
613 | newerprompt = re.sub(re.escape(j[0]),j[2][j[3]],newprompt)
614 | newprompt = re.sub(re.escape(j[0]),j[2][j[3]],newprompt)
615 | j[3]+=1
616 | positive = ([[clip.encode(newerprompt), {}]] )
617 | return common_ksampler(model, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, c1, denoise=denoise, disable_noise=disable_noise, start_step=steps-1, last_step=steps, force_full_denoise=force_full_denoise,flag=True)
618 |
619 |
620 | class KSamplerPromptEditAndAlternate:
621 | @classmethod
622 | def INPUT_TYPES(s):
623 | return {"required":
624 | {"model": ("MODEL",),
625 | "add_noise": (["enable", "disable"], ),
626 | "noise_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
627 | "steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
628 | "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}),
629 | "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
630 | "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ),
631 | "positive": ("CONDITIONING", ),
632 | "negative": ("CONDITIONING", ),
633 | "latent_image": ("LATENT", ),
634 | "control_net": ("CONTROL_NET", ),
635 | "image": ("IMAGE", ),
636 | "ControlNet_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
637 | "return_with_leftover_noise": (["disable", "enable"], ),"clip": ("CLIP", ),"text": ("STRING", {"multiline": True})
638 |
639 | }
640 | }
641 |
642 | RETURN_TYPES = ("LATENT",)
643 | FUNCTION = "sample"
644 |
645 | CATEGORY = "SyrianFalcon/nodes"
646 | def sample(self, clip,model, add_noise, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, text, return_with_leftover_noise,control_net,image,ControlNet_strength, denoise=1.0):
647 | force_full_denoise = True
648 | if return_with_leftover_noise == "enable":
649 | force_full_denoise = False
650 | disable_noise = False
651 | if add_noise == "disable":
652 | disable_noise = True
653 | prevseed = noise_seed
654 | strength = ControlNet_strength
655 | try:
656 | prompts,numloop = promptscreator(text,steps)
657 |
658 | if len(prompts) ==0:
659 | prompts.append(text)
660 | if len(numloop) < 1:
661 | numloop.append(steps-1)
662 | except:
663 | prompts = [text,0]
664 | numloop = []
665 | print(prompts)
666 | arr = alternatingtokenfunc(prompts[0])
667 | newprompt = prompts[0]
668 | newerprompt = newprompt
669 | for j in arr:
670 | if j[3]>=len(j[2]):
671 | j[3] = 0
672 | newerprompt = re.sub(re.escape(j[0]),j[2][j[3]],newprompt)
673 | newprompt = re.sub(re.escape(j[0]),j[2][j[3]],newprompt)
674 | j[3]+=1
675 | positive = ([[clip.encode(newerprompt), {}]] )
676 | try:
677 | if strength == 0:
678 | pass
679 | else:
680 | c = []
681 | control_hint = image.movedim(-1,1)
682 | for t in positive:
683 | n = [t[0], t[1].copy()]
684 | c_net = control_net.copy().set_cond_hint(control_hint, strength)
685 | if 'control' in t[1]:
686 | c_net.set_previous_controlnet(t[1]['control'])
687 | n[1]['control'] = c_net
688 | c.append(n)
689 | positive = c
690 | except:
691 | pass
692 | print(newerprompt)
693 | c1 = common_ksampler(model, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise, disable_noise=disable_noise, start_step=0, last_step=1, force_full_denoise=force_full_denoise)
694 | noise_seed = prevseed
695 | lastprompt = prompts[0]
696 | for i in range(0,steps-1):
697 | path = False
698 | for idx0,el in enumerate(numloop):
699 | if el== i+1:
700 | path = True
701 | idxx = idx0
702 | if path:
703 | if len(prompts)==1:
704 | print("no: "+str(prompts[idxx]))
705 | lastprompt = prompts[idxx]
706 | newarr = alternatingtokenfunc(prompts[idxx])
707 | newprompt = prompts[idxx]
708 | else:
709 | print("yes: "+str(prompts[idxx+1]))
710 | lastprompt = prompts[idxx+1]
711 | newarr = alternatingtokenfunc(prompts[idxx+1])
712 | newprompt = prompts[idxx+1]
713 | if len(newarr)>=1:
714 | for idx,j in enumerate(newarr):
715 | if arr[idx][3]>=len(j[2]):
716 | arr[idx][3] = 0
717 | newerprompt = re.sub(re.escape(j[0]),j[2][j[3]],newprompt)
718 | newprompt = re.sub(re.escape(j[0]),j[2][j[3]],newprompt)
719 | arr[idx][3]+=1
720 | else:
721 | newerprompt = newprompt
722 | positive = ([[clip.encode(newerprompt), {}]] )
723 | else:
724 | newprompt = lastprompt
725 | for j in arr:
726 | if j[3]>=len(j[2]):
727 | j[3] = 0
728 | newerprompt = re.sub(re.escape(j[0]),j[2][j[3]],newprompt)
729 | newprompt = re.sub(re.escape(j[0]),j[2][j[3]],newprompt)
730 | j[3]+=1
731 | print(newerprompt)
732 | positive = ([[clip.encode(newerprompt), {}]] )
733 | try:
734 | if strength == 0:
735 | pass
736 | else:
737 | c = []
738 | control_hint = image.movedim(-1,1)
739 | for t in positive:
740 | n = [t[0], t[1].copy()]
741 | c_net = control_net.copy().set_cond_hint(control_hint, strength)
742 | if 'control' in t[1]:
743 | c_net.set_previous_controlnet(t[1]['control'])
744 | n[1]['control'] = c_net
745 | c.append(n)
746 | positive = c
747 | except:
748 | pass
749 | print(newerprompt)
750 | prevseed = random.randint(0,10000000000)
751 | c1 = common_ksampler(model, prevseed, steps, cfg, sampler_name, scheduler, positive, negative, c1, denoise=denoise, disable_noise=disable_noise, start_step=i, last_step=i+1, force_full_denoise=force_full_denoise)
752 | newarr = alternatingtokenfunc(prompts[-1])
753 | for idx,j in enumerate(newarr):
754 | if arr[idx][3]>=len(j[2]):
755 | arr[idx][3] = 0
756 | newerprompt = re.sub(re.escape(j[0]),j[2][j[3]],newprompt)
757 | newprompt = re.sub(re.escape(j[0]),j[2][j[3]],newprompt)
758 | arr[idx][3]+=1
759 | positive = ([[clip.encode(newerprompt), {}]] )
760 | try:
761 | if strength == 0:
762 | pass
763 | else:
764 | c = []
765 | control_hint = image.movedim(-1,1)
766 | for t in positive:
767 | n = [t[0], t[1].copy()]
768 | c_net = control_net.copy().set_cond_hint(control_hint, strength)
769 | if 'control' in t[1]:
770 | c_net.set_previous_controlnet(t[1]['control'])
771 | n[1]['control'] = c_net
772 | c.append(n)
773 | positive = c
774 | except:
775 | pass
776 | print(newerprompt)
777 | return common_ksampler(model, prevseed, steps, cfg, sampler_name, scheduler, positive, negative, c1, denoise=denoise, disable_noise=disable_noise, start_step=steps-1, last_step=steps, force_full_denoise=force_full_denoise,flag=True)
778 |
779 |
780 |
781 | def save_images(images, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None):
782 | full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, folder_paths.get_output_directory(), images[0].shape[1], images[0].shape[0])
783 | for image in images:
784 | i = 255. * image.cpu().numpy()
785 | img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8))
786 | metadata = PngInfo()
787 | if prompt is not None:
788 | metadata.add_text("prompt", json.dumps(prompt))
789 | if extra_pnginfo is not None:
790 | for x in extra_pnginfo:
791 | metadata.add_text(x, json.dumps(extra_pnginfo[x]))
792 |
793 | file = f"{filename}_{counter:05}_.png"
794 | img.save(os.path.join(full_output_folder, file), pnginfo=metadata, compress_level=4)
795 |
796 |
797 |
798 | class LoopBack:
799 | @classmethod
800 | def INPUT_TYPES(s):
801 | return {"required":
802 | {"model": ("MODEL",),
803 | "loops": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
804 | "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
805 | "steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
806 | "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}),
807 | "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
808 | "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ),
809 | "positive": ("CONDITIONING", ),
810 | "negative": ("CONDITIONING", ),
811 | "latent_image": ("LATENT", ),
812 | "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
813 | "vae": ("VAE", ),
814 | "save_all": (["enable", "disable"], ),
815 | }
816 | }
817 |
818 | RETURN_TYPES = ("LATENT",)
819 | FUNCTION = "sample"
820 |
821 | CATEGORY = "SyrianFalcon/nodes"
822 |
823 | def sample(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image,loops: int,vae,save_all, denoise=1.0):
824 | c1 = common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=1.0)
825 | if save_all == "enable":
826 | im = vae.decode(c1["samples"])
827 | save_images(im,filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None)
828 | for i in range(0,loops-2 if loops>1 else 0):
829 | c1 = common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, c1, denoise=denoise)
830 | if save_all == "enable":
831 | im = vae.decode(c1["samples"])
832 | save_images(im,filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None)
833 | return common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, c1, denoise=denoise,flag=True)
834 |
835 |
836 | class CompositeImage:
837 | @classmethod
838 | def INPUT_TYPES(s):
839 | return {"required":
840 | {"bg_image":("IMAGE",),
841 | "inp_img":("IMAGE",),
842 | "x": ("INT", {"default": 0, "min": -10000, "max": 10000}),
843 | "y": ("INT", {"default": 0, "min": -10000, "max": 10000}),
844 |
845 | }
846 | }
847 |
848 | RETURN_TYPES = ("IMAGE",)
849 | FUNCTION = "sample"
850 |
851 | CATEGORY = "SyrianFalcon/nodes"
852 | def sample(self, bg_image:torch.Tensor,inp_img:torch.Tensor,x:int,y:int):
853 | img = bg_image[0].numpy()
854 | img = img*255.0
855 | inp = inp_img[0].numpy()
856 | inp = inp*255.0
857 | inp = Image.fromarray(np.uint8(inp))
858 | bgimg = Image.fromarray(np.uint8(img)).convert("RGBA")
859 | bgimg.paste(inp,(x,y),inp)
860 | bgimg = bgimg.convert("RGB")
861 | img = np.array(bgimg).astype(np.float32) / 255.0
862 | img = torch.from_numpy(img)[None,]
863 | return (img,)
864 |
865 |
866 | NODE_CLASS_MAPPINGS = {
867 | "WordAsImage": WordAsImage,
868 | "QRGenerate":QRGenerate,
869 | "KSamplerPromptEdit":KSamplerPromptEdit,
870 | "KSamplerAlternate":KSamplerAlternate,
871 | "KSamplerPromptEditAndAlternate":KSamplerPromptEditAndAlternate,
872 | "LoopBack":LoopBack,
873 | "CompositeImage": CompositeImage
874 | }
875 |
--------------------------------------------------------------------------------