${currBatchSize}/${totalBatchSize} Running...
`
218 | }
219 | await executeAndWaitForTargetNode(app, node);
220 | log('Queue finished')
221 | await new Promise(re => setTimeout(re, 500));
222 | }
223 | }
224 | } catch (error) {
225 | console.error(`Error while running queue: ${error}`)
226 |
227 | } finally {
228 | preview.value = `
`
229 | }
230 |
231 | })();
232 | }
233 | )
234 |
235 | const preview = node.addCustomWidget(DEBUG_STRING('Preview', ''))
236 | preview.parent = node
237 |
238 | const disableToggleWidget = node.addWidget("toggle", "Disable Unrelated Nodes", false, "", { "on": 'yes', "off": 'no' });
239 |
240 | disableToggleWidget.doModeChange = (forceValue, skipOtherNodeCheck) => {
241 | console.log(`toggle changed`)
242 |
243 | const toggleValue = disableToggleWidget.value;
244 |
245 | if (toggleValue) {
246 | disableToggleWidget.notAlreadyMutedBlacklist = enableOnlyRelatedNodes(node)
247 | } else if (disableToggleWidget.notAlreadyMutedBlacklist) {
248 | for (const node of disableToggleWidget.notAlreadyMutedBlacklist) node.mode = 0;
249 | } else {
250 | app.graph._nodes.forEach((node) => {
251 | node.mode = 0;
252 | })
253 | }
254 | }
255 | disableToggleWidget.callback = () => {
256 | disableToggleWidget.doModeChange();
257 | };
258 |
259 | const autoQueueToggleWidget = node.addWidget("toggle", "Auto Queue", false, "", { "on": 'yes', "off": 'no' });
260 |
261 |
262 | node.setSize(node.computeSize());
263 |
264 |
265 | }
266 |
267 | let has_custom = false;
268 | if (nodeData.input && nodeData.input.required) {
269 | for (const i of Object.keys(nodeData.input.required)) {
270 | const input_type = nodeData.input.required[i][0];
271 |
272 | if (newTypes.includes(input_type)) {
273 | has_custom = true
274 | break
275 | }
276 | }
277 | }
278 | if (has_custom) {
279 | const onNodeCreated = nodeType.prototype.onNodeCreated
280 | nodeType.prototype.onNodeCreated = function() {
281 | const r = onNodeCreated ? onNodeCreated.apply(this, arguments) : undefined;
282 | this.serialize_widgets = true;
283 | this.setSize?.(this.computeSize());
284 |
285 | this.onRemoved = function() {
286 | shared.cleanupNode(this);
287 | }
288 | return r;
289 | }
290 |
291 | //- Extra menus
292 | const origGetExtraMenuOptions = nodeType.prototype.getExtraMenuOptions
293 | nodeType.prototype.getExtraMenuOptions = function (_, options) {
294 | const r = origGetExtraMenuOptions
295 | ? origGetExtraMenuOptions.apply(this, arguments)
296 | : undefined
297 | if (this.widgets) {
298 | let toInput = []
299 | let toWidget = []
300 | for (const w of this.widgets) {
301 | if (w.type === shared.CONVERTED_TYPE) {
302 | //- This is already handled by widgetinputs.js
303 | // toWidget.push({
304 | // content: `Convert ${w.name} to widget`,
305 | // callback: () => shared.convertToWidget(this, w),
306 | // });
307 | } else if (newTypes.includes(w.type)) {
308 | const config = nodeData?.input?.required[w.name] ||
309 | nodeData?.input?.optional?.[w.name] || [w.type, w.options || {}]
310 |
311 | toInput.push({
312 | content: `Convert ${w.name} to input`,
313 | callback: () => shared.convertToInput(this, w, config),
314 | })
315 | }
316 | }
317 | if (toInput.length) {
318 | options.push(...toInput, null)
319 | }
320 |
321 | if (toWidget.length) {
322 | options.push(...toWidget, null)
323 | }
324 | }
325 |
326 | return r
327 | }
328 | }
329 |
330 | log("Start setting komojini extension", nodeData.name)
331 |
332 | // Extending Python Nodes
333 | if (nodeData.name.endsWith("Getter")) {
334 | const onNodeCreated = nodeType.prototype.onNodeCreated
335 | nodeType.prototype.onNodeCreated = function () {
336 | const r = onNodeCreated
337 | ? onNodeCreated.apply(this, arguments)
338 | : undefined;
339 |
340 |
341 | var nameSuffix = "";
342 | if (nodeData.name.includes("Cache")) {
343 | nameSuffix = " (cached)";
344 | }
345 |
346 | this.widgets = [];
347 |
348 | this.addWidget(
349 | "combo",
350 | "key",
351 | "",
352 | (e) => {
353 | this.onRename();
354 | },
355 | {
356 | values: () => {
357 | const setterNodes = this.graph._nodes.filter((otherNode) => isSetter(otherNode));
358 | return setterNodes.map((otherNode) => otherNode.widgets[0].value).sort();
359 | }
360 | }
361 | );
362 |
363 | this.findSetter = function(graph) {
364 | const name = this.widgets[0].value;
365 | return graph._nodes.find(otherNode => isSetter(otherNode) && otherNode.widgets[0].value === name && name !== '');
366 | }
367 |
368 | this.setName = function(name) {
369 | node.widgets[0].value = name;
370 | node.onRename();
371 | node.serialize();
372 | }
373 |
374 | this.setType = function(type) {
375 | this.outputs[0].name = type;
376 | this.outputs[0].type = type;
377 | // this.validateLinks();
378 | }
379 |
380 | this.onRename = function() {
381 | const setter = this.findSetter(this.graph);
382 | if (setter) {
383 | let linkType = (setter.inputs[0].type);
384 |
385 | this.setType(linkType);
386 | this.title = "Get_" + setter.widgets[0].value + nameSuffix;
387 |
388 | if (app.ui.settings.getSettingValue("komojini.NodeAutoColor")){
389 | setColorAndBgColor.call(this, linkType);
390 | }
391 |
392 | } else {
393 | this.setType('*');
394 | }
395 | }
396 |
397 | this.size = this.computeSize();
398 |
399 | return r;
400 |
401 | }
402 |
403 |
404 | }
405 | else if (nodeData.name.endsWith("Setter")) {
406 | const onNodeCreated = nodeType.prototype.onNodeCreated
407 | nodeType.prototype.onNodeCreated = function () {
408 | const r = onNodeCreated
409 | ? onNodeCreated.apply(this, arguments)
410 | : undefined;
411 | const keyValue = findWidgetByName(this, "key").value ?? '';
412 | const node = this;
413 |
414 | if (!this.properties) {
415 | this.properties = {
416 | "previousName": "",
417 | };
418 | }
419 |
420 |
421 | this.defaultVisibility = true;
422 | this.serialize_widgets = true;
423 | this.properties.showOutputText = true;
424 |
425 | this.widgets = [];
426 | this.inputs = [];
427 |
428 | this.addInput("value", "*");
429 |
430 | this.addWidget(
431 | "text",
432 | "key",
433 | keyValue,
434 | (s, t, u, v, x) => {
435 | // node.validateName(node.graph);
436 | if(this.widgets[0].value !== ''){
437 | var preFix = ""
438 | if (nodeData.name.includes("adv")) {
439 | preFix = "🔥(adv) "
440 | }
441 | else if (nodeData.name.includes("Flow")) {
442 | preFix = "🔥 "
443 | }
444 | this.title = preFix + "Set_" + this.widgets[0].value;
445 | }
446 | this.update();
447 | this.properties.previousName = this.widgets[0].value;
448 | },
449 | {}
450 | )
451 |
452 | if (nodeData.name.includes("FlowBuilder")) {
453 |
454 | if ( nodeData.name.includes("adv") ) {
455 | addAdvancedFlowWidgets(this);
456 | } else {
457 | addFlowRunButton(this);
458 | }
459 | }
460 |
461 | this.findGetters = function(graph, checkForPreviousName) {
462 | const name = checkForPreviousName ? this.properties.previousName : this.widgets[0].value;
463 | return graph._nodes.filter(otherNode => isGetter(otherNode) && otherNode.widgets[0].value === name && name !== '' );
464 | }
465 |
466 | this.update = function() {
467 | if (!node.graph) {
468 | return;
469 | }
470 |
471 | try {
472 | const getters = this.findGetters(node.graph);
473 | getters.forEach(getter => {
474 | if (getter.setType) {
475 | getter.setType?.(this.inputs[0].type);
476 | } else {
477 | setTypeOtherNode(getter, this.inputs[0].type);
478 | }
479 | });
480 |
481 | if (this.widgets[0].value) {
482 | const gettersWithPreviousName = this.findGetters(node.graph, true);
483 | gettersWithPreviousName.forEach(getter => {
484 |
485 | if (getter.setName ) {
486 | getter.setName(this.widgets[0].value);
487 | } else {
488 | getter.widgets[0].value = this.widgets[0].value;
489 | }
490 | });
491 | }
492 |
493 | const allGetters = node.graph._nodes.filter(otherNode => otherNode.type === "GetNode");
494 | allGetters.forEach(otherNode => {
495 | if (otherNode.setComboValues) {
496 | otherNode.setComboValues();
497 | }
498 | });
499 | } catch (error) {
500 | console.error(`Failed to update Setter: ${error}`)
501 | }
502 | }
503 |
504 | this.validateName = function(graph) {
505 | let widgetValue = node.widgets[0].value;
506 |
507 | if (widgetValue !== '') {
508 | let tries = 0;
509 | const existingValues = new Set();
510 |
511 | graph._nodes.forEach(otherNode => {
512 | if (otherNode !== this && isSetter(otherNode)) {
513 | existingValues.add(otherNode.widgets[0].value);
514 | }
515 | });
516 |
517 | while (existingValues.has(widgetValue)) {
518 | widgetValue = node.widgets[0].value + "_" + tries;
519 | tries++;
520 | }
521 |
522 | node.widgets[0].value = widgetValue;
523 | this.update();
524 | }
525 | }
526 |
527 | this.onAdded = function(graph) {
528 | this.validateName(graph);
529 | }
530 |
531 | this.onConnectionsChange = function(
532 | slotType, //1 = input, 2 = output
533 | slot,
534 | isChangeConnect,
535 | link_info,
536 | output
537 | ) {
538 | console.log(`Setter node connection`)
539 | try {
540 | //On Disconnect
541 | if (slotType == 1 && !isChangeConnect) {
542 | if(this.inputs[slot].name === ''){
543 | this.inputs[slot].type = '*';
544 | // this.inputs[slot].name = 'value';
545 | this.title = "Setter"
546 | }
547 | }
548 | if (slotType == 2 && !isChangeConnect) {
549 | this.outputs[slot].type = '*';
550 | this.outputs[slot].name = '*';
551 |
552 | }
553 | //On Connect
554 | if (link_info && node.graph && slotType == 1 && isChangeConnect) {
555 | console.log("setternode connected");
556 | const fromNode = node.graph._nodes.find((otherNode) => otherNode.id == link_info.origin_id);
557 |
558 | if (fromNode && fromNode.outputs && fromNode.outputs[link_info.origin_slot]) {
559 | const type = fromNode.outputs[link_info.origin_slot].type;
560 |
561 | if (this.title === "Setter" && nodeData.name == "Setter"){
562 | this.title = "Set_" + type;
563 | }
564 | if (this.widgets[0].value === '*'){
565 | this.widgets[0].value = type
566 | }
567 |
568 | this.validateName(node.graph);
569 | this.inputs[0].type = type;
570 | // this.inputs[0].name = type;
571 |
572 | if (app.ui.settings.getSettingValue("komojini.NodeAutoColor")){
573 | setColorAndBgColor.call(this, type);
574 | }
575 | } else {
576 | alert("Error: Set node input undefined. Most likely you're missing custom nodes");
577 | }
578 | }
579 | if (link_info && node.graph && slotType == 2 && isChangeConnect) {
580 | const fromNode = node.graph._nodes.find((otherNode) => otherNode.id == link_info.origin_id);
581 |
582 | if (fromNode && fromNode.inputs && fromNode.inputs[link_info.origin_slot]) {
583 | const type = fromNode.inputs[link_info.origin_slot].type;
584 |
585 | this.outputs[0].type = type;
586 | // this.outputs[0].name = type;
587 | } else {
588 | alert("Error: Get Set node output undefined. Most likely you're missing custom nodes");
589 | }
590 | }
591 | }
592 | catch (error) {
593 | console.error(`Error onConnectionChange in Setter ${error}`)
594 | }
595 | //Update either way
596 | // this.update();
597 | }
598 |
599 | this.clone = function () {
600 | const cloned = nodeType.prototype.clone.apply(this);
601 | cloned.inputs[0].name = 'value';
602 | cloned.inputs[0].type = '*';
603 | cloned.value = '';
604 | cloned.properties.previousName = '';
605 | cloned.size = cloned.computeSize();
606 | return cloned;
607 | };
608 |
609 | this.onRemoved = () => {
610 | const allGetters = this.graph._nodes.filter((otherNode) => isGetter(otherNode));
611 | allGetters.forEach((otherNode) => {
612 | if (otherNode.setComboValues) {
613 | otherNode.setComboValues([this]);
614 | }
615 | })
616 | shared.cleanupNode(this)
617 | }
618 | this.inputs[0].name = "value";
619 |
620 | this.size = this.computeSize();
621 |
622 | return r;
623 | }
624 | } else if (nodeData.name.startsWith('FlowBuilder' || nodeData.name.endsWith('FlowBuilder')) ) {
625 | const onNodeCreated = nodeType.prototype.onNodeCreated
626 | nodeType.prototype.onNodeCreated = function () {
627 | const r = onNodeCreated
628 | ? onNodeCreated.apply(this, arguments)
629 | : undefined
630 |
631 | this.changeMode(LiteGraph.ALWAYS);
632 |
633 | if ( nodeData.name.includes("adv")) {
634 | console.log(`Advanced Flowbuilder added.`)
635 | addAdvancedFlowWidgets(this);
636 | } else {
637 | console.log(`Flowbuilder added.`)
638 | addFlowRunButton(this);
639 | }
640 |
641 | this.onRemoved = () => {
642 | shared.cleanupNode(this)
643 | app.canvas.setDirty(true)
644 | }
645 |
646 | return r;
647 | }
648 | }
649 | },
650 | nodeCreated(node, app) {
651 | if (node.comfyClass == "DragNUWAImageCanvas") {
652 | if (!node.properties) {
653 | node.properties = {}
654 | }
655 |
656 | const sizes = [
657 | "576x320",
658 | "320x576",
659 | "512x512",
660 | ];
661 |
662 | console.log(`DragNUWAImageCanvas Created`);
663 | const w = findWidgetByName(node, "image");
664 | const dragTextWidget = findWidgetByName(node, "tracking_points")
665 |
666 | shared.hideWidgetForGood(node, w)
667 |
668 | node.addWidget("button", "Get Drag Values", "", () => {
669 | openEditorDialog(node)
670 | })
671 |
672 | console.log(node)
673 |
674 | Object.defineProperty(node.properties, "draglines", {
675 | set(v) {
676 | const newDraglines = [];
677 |
678 | for (var i = 0; i < v.length; i++) {
679 | if (i < v.length - 1 && v[i].length > 1) {
680 | newDraglines.push(v[i])
681 | } else if (i === v.length - 1) {
682 | newDraglines.push(v[i])
683 | }
684 | }
685 | node.properties._draglines = newDraglines;
686 | },
687 | get() {
688 | return node.properties._draglines ?? [];
689 | }
690 | });
691 |
692 | Object.defineProperty(w, 'value', {
693 | set(v) {
694 | if(v != '[IMAGE DATA]' && v != "") {
695 | const img = new Image();
696 | img.onload = function() {
697 | console.log(`Set Image value of size(${img.width}x${img.height})`)
698 | }
699 | img.src = v;
700 | w._value = v;
701 | }
702 | },
703 | get() {
704 | const stackTrace = new Error().stack;
705 | if(!stackTrace.includes('draw') && !stackTrace.includes('graphToPrompt') && stackTrace.includes('app.js')) {
706 | return "[IMAGE DATA]";
707 | }
708 | else {
709 | return w._value;
710 | }
711 | },
712 | });
713 |
714 | Object.defineProperty(node.properties, "size", {
715 | set(v) {
716 | node.properties._size = v;
717 | },
718 | get() {
719 | if (node.properties._size) {
720 | return node.properties._size;
721 | } else {
722 | return ["576", "320"]
723 | }
724 | }
725 | })
726 |
727 | let set_img_act = (v) => {
728 | console.log(`set_img_act`)
729 |
730 | node._img = v;
731 |
732 | };
733 |
734 | Object.defineProperty(node, "imgs", {
735 | set(v) {
736 | if (!v[0].complete) {
737 | let orig_onload = v[0].onload;
738 | v[0].onload = function(v2) {
739 | if(orig_onload)
740 | orig_onload();
741 | set_img_act(v);
742 | };
743 | }
744 | else {
745 | set_img_act(v);
746 | }
747 | },
748 | get() {
749 | if(node._img == undefined && w.value != '') {
750 | node._img = [new Image()];
751 | if(w.value && w.value != '[IMAGE DATA]')
752 | node._img[0].src = w.value;
753 | }
754 |
755 | return node._img;
756 | }
757 | });
758 |
759 |
760 | node.closeEditorDialog = function(accept) {
761 | node.properties.dialogOpened = false;
762 |
763 | if (accept) {
764 |
765 | }
766 | node.dialog.close()
767 | }
768 |
769 | const openEditorDialog = function(node) {
770 | node.properties.dialogOpened = true;
771 | node.dialog = new app.ui.dialog.constructor()
772 |
773 | // node.dialog.element.style.height = "90%";
774 | // node.dialog.element.style.width = "90%";
775 | // node.dialog.element.style.display = "block";
776 |
777 | console.log(`Setup dialog size: ${node.dialog.element.width}, ${node.dialog.element.height}`)
778 |
779 | function setTrackingPoints() {
780 | console.log('setTrackingPoints')
781 | draglineTextEl.value = JSON.stringify(node.properties.draglines, null, 0)
782 | }
783 |
784 | function setTrackingPointsWidget() {
785 | console.log('setTrackingPointsWidget')
786 | dragTextWidget.value = JSON.stringify(node.properties.draglines, null, 0)
787 | }
788 |
789 | // node.dialog.element.classList.add('comfy-settings')
790 | const closeButton = node.dialog.element.querySelector('button')
791 | closeButton.textContent = 'CANCEL'
792 | const saveButton = document.createElement('button')
793 | saveButton.textContent = 'SAVE'
794 | saveButton.onclick = () => {
795 | node.closeEditorDialog(true)
796 | // _refreshCanvas()
797 |
798 | node.imgs = [imageNode];
799 |
800 | setTrackingPoints();
801 | setTrackingPointsWidget();
802 |
803 | if (canvasEl) {
804 | const ctx = canvasEl.getContext('2d');
805 | _drawImage(node, imageNode, canvasEl, ctx);
806 | const base64Img = canvasEl.toDataURL('image/png');
807 | w.value = base64Img;
808 | }
809 | }
810 | closeButton.onclick = () => {
811 | node.closeEditorDialog(false)
812 | }
813 | closeButton.before(saveButton)
814 |
815 | node.properties.newline = true
816 |
817 | const container = document.createElement("div")
818 |
819 | container.id = "drag-image-container";
820 | // container.style = "display: flex; flex-wrap: wrap; gap: 10px; justify-content: space-around;"
821 |
822 | Object.assign(container.style, {
823 | display: 'flex',
824 | gap: '10px',
825 | // flexWrap: 'wrap',
826 | flexDirection: 'row',
827 | // justifyContent: 'space-around',
828 | })
829 |
830 |
831 | // Object.assign(container.style, {
832 | // display: 'flex',
833 | // gap: '10px',
834 | // flexDirection: 'column',
835 | // })
836 |
837 | const imageNode = document.createElement("img")
838 | if (node.imgs) {
839 | imageNode.src = node.imgs[0].src
840 | imageNode.width = node.imgs[0].width
841 | imageNode.height = node.imgs[0].height
842 | }
843 | imageNode.id = "canvasImage"
844 |
845 |
846 | const canvasEl = document.createElement("canvas")
847 | canvasEl.id = "imageCanvas"
848 |
849 | Object.assign(canvasEl, {
850 | height: `${node.properties.size[1]}px`,
851 | width: `${node.properties.size[0]}px`,
852 | style: "border: 1px dotted gray;",
853 | })
854 |
855 | node.properties.canvas = canvasEl;
856 | container.append(canvasEl)
857 |
858 |
859 | const _refreshCanvas = () => {
860 |
861 | shared.infoLogger(`Update Dialog Canvas`)
862 |
863 | node.properties.newline = true;
864 |
865 | var ctx
866 | // const canvasEl = document.getElementById("imageCanvas")
867 | // const imageNode = document.getElementById("canvasImage")
868 |
869 | if (canvasEl.getContext) {
870 | ctx = canvasEl.getContext("2d")
871 | }
872 |
873 | var x=0, y=0, w=imageNode.width, h=imageNode.height;
874 | node.properties.size = sizeSelectorEl.value.split("x");
875 | // node.properties.size = document.getElementById("sizeSelector").value.split("x");
876 | const size = node.properties.size;
877 |
878 | console.log(`Setting canvas size: ${node.properties.size}`)
879 |
880 | canvasEl.width = size[0];
881 | canvasEl.height = size[1];
882 |
883 | canvasEl.style = `width: ${size[0]}px; height: ${size[1]}px;`
884 | canvasEl.style.border = "1px dotted gray"
885 |
886 | if (!imageNode.width) {
887 | console.warn(`No Image node for updating canvas.`)
888 | }
889 |
890 | else if (imageNode.width / imageNode.height > canvasEl.width/canvasEl.height) {
891 | y = 0;
892 | h = imageNode.height
893 | w = imageNode.height * canvasEl.width / canvasEl.height
894 | x = (imageNode.width - w) / 2
895 | } else {
896 | x = 0;
897 | w = imageNode.width
898 | h = imageNode.width * canvasEl.height / canvasEl.width
899 | y = (imageNode.height - h) / 2
900 | }
901 | ctx.drawImage(imageNode, x, y, w, h, 0, 0, canvasEl.width, canvasEl.height)
902 |
903 | node.properties.draglines = [];
904 | console.log('canvas updated', canvasEl)
905 | }
906 |
907 |
908 | const draglineTextEl = document.createElement("textarea")
909 | draglineTextEl.id = "draglinetext"
910 | // draglineTextEl.style.height = draglineTextEl.scrollHeight + 'px'; // Set the height to the scrollHeight
911 | draglineTextEl.readOnly = true;
912 |
913 | function _undo() {
914 | const newDraglines = [...node.properties.draglines];
915 |
916 | const lastLine = getLast(newDraglines);
917 | lastLine.pop();
918 | if (lastLine.length === 0) {
919 | newDraglines.pop();
920 | }
921 | node.properties.draglines = [...newDraglines];
922 | drawAllLines(node, node.properties.draglines, imageNode, canvasEl);
923 | setTrackingPoints();
924 | }
925 |
926 | function handleKeydown(e) {
927 |
928 | if (!node.properties.dialogOpened) {
929 | return;
930 | }
931 |
932 | else if (e.key === 'Enter') {
933 | setNewline();
934 | drawAllLines(node, node.properties.draglines, imageNode, canvasEl);
935 | }
936 | else if (e.key === 'Escape') {
937 | node.closeEditorDialog(false);
938 | }
939 |
940 | console.log(e);
941 | }
942 | document.addEventListener('keydown', handleKeydown)
943 |
944 | canvasEl.addEventListener('mousedown', handleMouseDown)
945 | canvasEl.addEventListener('mousemove', handleMouseMove)
946 | canvasEl.addEventListener('mouseout', handleMouseOut)
947 |
948 | function handleMouseOut(e) {
949 | console.log("on mouseout");
950 | drawAllLines(node, node.properties.draglines, imageNode, canvasEl);
951 | }
952 |
953 | function handleMouseMove(e) {
954 | const rect = canvasEl.getBoundingClientRect();
955 | const x = Math.round(e.clientX - rect.left);
956 | const y = Math.round(e.clientY - rect.top);
957 |
958 | var currentDraglines;
959 |
960 | if (node.properties.newline) {
961 | currentDraglines = [...node.properties.draglines, [[x, y]]]
962 | } else {
963 | let prevDragline = getLast(node.properties.draglines) ?? [];
964 | currentDraglines = [...node.properties.draglines];
965 | currentDraglines[currentDraglines.length -1] = [...prevDragline, [x, y]]
966 | }
967 |
968 | drawAllLines(node, currentDraglines, imageNode, canvasEl);
969 |
970 | }
971 |
972 | function handleMouseDown(e) {
973 | // Get the mouse coordinates relative to the canvas
974 | console.log("mousedown")
975 | const rect = canvasEl.getBoundingClientRect();
976 |
977 | const x = Math.round(e.clientX - rect.left);
978 | const y = Math.round(e.clientY - rect.top);
979 | // console.log(`${e.clientX} - ${rect.left}, ${e.clientY} - ${rect.top}`)
980 | // Now, you have the x, y position relative to the canvas
981 | console.log('Mouse Down at:', x, y);
982 |
983 | // Optionally, you can pass x and y to another function
984 | // Do something with x and y, e.g., draw on the canvas
985 | // const canvasEl = document.getElementById("imageCanvas")
986 | // const imageNode = document.getElementById("canvasImage")
987 |
988 | var ctx
989 |
990 | if (canvasEl.getContext) {
991 | ctx = canvasEl.getContext("2d")
992 | }
993 |
994 | if (node.properties.newline) {
995 | node.properties.draglines = [...node.properties.draglines, [[x, y]]]
996 | node.properties.newline = false;
997 |
998 | } else {
999 |
1000 | const prevDragLine = getLast(node.properties.draglines);
1001 |
1002 | if (prevDragLine) {
1003 | prevDragLine.push([x, y])
1004 | } else {
1005 | node.properties.draglines = [...node.properties.draglines, [[x, y]]]
1006 | }
1007 | }
1008 |
1009 | setTrackingPoints();
1010 | drawAllLines(node, node.properties.draglines, imageNode, canvasEl)
1011 | // draglineTextEl.value = JSON.stringify(node.properties.draglines, null, 0)
1012 | }
1013 |
1014 | const inputContainer = document.createElement("div")
1015 | Object.assign(inputContainer.style, {
1016 | display: 'flex',
1017 | gap: '10px',
1018 | flexDirection: 'column',
1019 | })
1020 | const sizeSelectorEl = document.createElement("select")
1021 | sizeSelectorEl.id = "sizeSelector"
1022 | let sizeOptions = "";
1023 | sizes.forEach((size) => {
1024 | const nodeSize = `${node.properties.size[0]}x${node.properties.size[1]}`;
1025 | if (nodeSize == size) {
1026 | sizeOptions += `
${size} `
1027 | } else {
1028 | sizeOptions += `
${size} `
1029 | }
1030 | return sizeOptions
1031 | })
1032 |
1033 | sizeSelectorEl.insertAdjacentHTML("beforeend", sizeOptions)
1034 |
1035 | sizeSelectorEl.onchange = _refreshCanvas
1036 |
1037 | const imageInputEl = document.createElement("input")
1038 | Object.assign(imageInputEl, {
1039 | type: "file",
1040 | id: "inputFile",
1041 | accept: "image/*",
1042 | })
1043 | node.properties.imageNode = imageNode;
1044 |
1045 | imageInputEl.onchange = function(e) {
1046 | shared.infoLogger(`Image chosen`)
1047 | var file = e.target.files[0];
1048 | var reader = new FileReader();
1049 | reader.onload = function(e) {
1050 | shared.infoLogger(`Image onload 1`)
1051 | // const imageNode = document.getElementById("canvasImage")
1052 |
1053 | var img = new Image();
1054 |
1055 | img.onload = function() {
1056 | console.log(`Got image of size ${img.width}x${img.height}`)
1057 | imageNode.width = img.width;
1058 | imageNode.height = img.height;
1059 | var ctx;
1060 |
1061 | if (canvasEl.getContext) {
1062 | ctx = canvasEl.getContext("2d")
1063 | }
1064 |
1065 | imageNode.src = e.target.result;
1066 | imageNode.onload = function () {
1067 | shared.infoLogger(`Image onload 2`)
1068 |
1069 | var x=0,y=0,w=node.width,h=node.height;
1070 | const size=document.getElementById("sizeSelector").value.split('x');
1071 | canvasEl.width=size[0];
1072 | canvasEl.height=size[1];
1073 |
1074 | refresh();
1075 | };
1076 | };
1077 | img.src = e.target.result;
1078 | };
1079 | file && reader.readAsDataURL(file);
1080 | }
1081 |
1082 | const refresh = () => {
1083 | node.properties.newline = true;
1084 | node.properties.draglines = []
1085 | draglineTextEl.value = JSON.stringify(node.properties.draglines, null, 0)
1086 |
1087 | _refreshCanvas()
1088 | }
1089 | const refreshButton = document.createElement("button");
1090 | refreshButton.textContent = "Refresh"
1091 | refreshButton.style.margin = "5px 10px"
1092 | refreshButton.onclick = refresh;
1093 |
1094 | function setNewline() {
1095 | node.properties.newline = true;
1096 | }
1097 |
1098 | const undoButton = document.createElement("button");
1099 | undoButton.textContent = "Undo"
1100 | undoButton.style.margin = "5px 10px"
1101 | undoButton.onclick = _undo;
1102 |
1103 | const newlineButton = document.createElement("button");
1104 | newlineButton.textContent = "New Line (Enter)"
1105 | newlineButton.style.margin = "5px 10px"
1106 | newlineButton.onclick = setNewline;
1107 | newlineButton.width = 100;
1108 |
1109 | const controlContainer = document.createElement("div")
1110 | Object.assign(controlContainer.style, {
1111 | display: "flex",
1112 | flexDirection: "column",
1113 | })
1114 |
1115 | const inputStyle = {
1116 | padding: '5px',
1117 | margin: '10px'
1118 | };
1119 |
1120 | controlContainer.append(sizeSelectorEl)
1121 | Object.assign(sizeSelectorEl.style, inputStyle)
1122 |
1123 | controlContainer.append(imageInputEl)
1124 | Object.assign(imageInputEl.style, inputStyle)
1125 |
1126 | controlContainer.append(newlineButton)
1127 | controlContainer.append(undoButton)
1128 | controlContainer.append(refreshButton)
1129 |
1130 | container.append(controlContainer)
1131 | // container.append(inputContainer)
1132 |
1133 | node.dialog.show('')
1134 | node.dialog.textElement.append(container)
1135 |
1136 | Object.assign(draglineTextEl.style, {
1137 | flex: 1,
1138 | margin: "20px",
1139 | })
1140 | controlContainer.append(draglineTextEl)
1141 |
1142 | _refreshCanvas()
1143 | node.properties.draglines = JSON.parse(dragTextWidget.value ?? "[]") ?? [];
1144 | setTrackingPoints();
1145 | }
1146 |
1147 |
1148 | shared.log(`Setup dialog`)
1149 |
1150 | }
1151 | }
1152 | }
1153 |
1154 |
1155 | app.registerExtension(komojini_widgets);
--------------------------------------------------------------------------------
/js/status_viewer.js:
--------------------------------------------------------------------------------
1 | import { app } from "../../scripts/app.js";
2 | import { api } from "../../scripts/api.js"
3 | import { ComfyDialog, $el } from "../../scripts/ui.js";
4 | import * as shared from "./comfy_shared.js";
5 |
6 |
7 | app.ui.settings.addSetting({
8 | id: "komojini.ShowSystemStatus",
9 | name: "🔥 Show System Status",
10 | type: "boolean",
11 | defaultValue: false,
12 | })
13 |
14 | app.registerExtension({
15 | name: "komojini.statusviewer",
16 | init() {
17 |
18 | },
19 | async setup() {
20 |
21 | if (!app.ui.settings.getSettingValue("komojini.ShowSystemStatus", false)) {
22 | return;
23 | }
24 |
25 | const menu = document.querySelector(".comfy-menu");
26 |
27 | const separator = document.createElement("hr");
28 |
29 | separator.style.margin = "10px 0";
30 | separator.style.width = "100%";
31 |
32 | const systemStatus = document.createElement("div");
33 | systemStatus.id = "systemStatus";
34 | systemStatus.style.width = "100%";
35 | systemStatus.style.height = "300";
36 | systemStatus.style.textAlign = "left";
37 | systemStatus.style.backgroundColor = "black";
38 | systemStatus.style.padding = "0 6px";
39 | systemStatus.style.margin = "0 10px";
40 | systemStatus.style.borderRadius = "8px";
41 |
42 | const cpuInfoEl = document.createElement("div");
43 | cpuInfoEl.id = "cpuInfo";
44 | cpuInfoEl.style.width = "100%";
45 | cpuInfoEl.style.margin = "8px 0 0 0";
46 |
47 | const gpuInfoEl = document.createElement("div");
48 | gpuInfoEl.id = "gpuInfo";
49 | gpuInfoEl.style.width = "100%";
50 | // gpuInfoEl.style.textAlign = "left";
51 |
52 | const memoryInfoEl = document.createElement("div");
53 | memoryInfoEl.id = "memoryInfo";
54 | memoryInfoEl.style.width = "100%";
55 | memoryInfoEl.style.margin = "8px 0";
56 |
57 | systemStatus.appendChild(gpuInfoEl);
58 | systemStatus.appendChild(cpuInfoEl);
59 | systemStatus.appendChild(memoryInfoEl);
60 |
61 |
62 | function getStyledText(text, style) {
63 | var styleString = "";
64 | if (style) {
65 | for (var styleProp in style) {
66 | styleString += `${styleProp}: ${style[styleProp]};`;
67 | }
68 | } else {
69 | return text;
70 | }
71 |
72 | return `
${text} `
73 | }
74 |
75 | function addTitleEl(title, parent) {
76 | const titleEl = document.createElement("div");
77 | titleEl.innerHTML = getStyledText(title, {color: "yellow"});
78 | titleEl.style.margin = "10px 0";
79 | parent.appendChild(titleEl);
80 | return titleEl;
81 | }
82 |
83 | const gpuTitleEl = addTitleEl("GPU", gpuInfoEl);
84 | const cpuTitleEl = addTitleEl("CPU", cpuInfoEl);
85 | const memoryTitleEl = addTitleEl("Memory", memoryInfoEl);
86 |
87 | let gpuElements = [];
88 |
89 |
90 | const gpuUsageEl = document.createElement("div");
91 | gpuUsageEl.id = "gpuUsage";
92 | gpuElements.push(gpuUsageEl)
93 |
94 | const gpuMemoryUsageEl = document.createElement("div");
95 | gpuMemoryUsageEl.id = "gpuMemoryUsage";
96 | gpuElements.push(gpuMemoryUsageEl)
97 |
98 | const gpuTemperatureEl = document.createElement("div");
99 | gpuTemperatureEl.id = "gpuTemperature";
100 | gpuElements.push(gpuTemperatureEl)
101 |
102 | for (var gpuElement of gpuElements) {
103 | gpuElement.style.margin = "4px";
104 | gpuInfoEl.appendChild(gpuElement);
105 | }
106 |
107 | const cpuUsageEl = document.createElement("div");
108 | cpuUsageEl.id = "cpuUsage";
109 | cpuUsageEl.style.margin = "4px";
110 | cpuInfoEl.appendChild(cpuUsageEl);
111 |
112 | const memoryUsageEl = document.createElement("div");
113 | memoryUsageEl.id = "memoryUsage";
114 | memoryUsageEl.style.margin = "4px";
115 | memoryInfoEl.appendChild(memoryUsageEl);
116 |
117 | const nameStyle = {
118 | display: "inline-block",
119 | width: "30%",
120 | }
121 |
122 | const updateSystemStatus = (data) => {
123 |
124 | cpuUsageEl.innerHTML = `${getStyledText("Usage", nameStyle)}: ${getStyledText(data.cpu.cpu_usage, {color: "white"})}${getStyledText("%", {color: "white"})}`;
125 | const gpuInfo = data.gpus[0];
126 | gpuTitleEl.innerHTML = getStyledText("GPU ", {color: "yellow"}) + "
" + `(${getStyledText(gpuInfo.name, {"font-size": "8pt"})})`;
127 |
128 | gpuUsageEl.innerHTML = `${getStyledText("Usage", nameStyle)}: ${getStyledText(Math.round(gpuInfo.load * 100), {color: "white"})}${getStyledText("%", {color: "white"})}`;
129 |
130 | gpuMemoryUsageEl.innerHTML = `${getStyledText("VRAM", nameStyle)}:
131 | ${getStyledText(Math.round(gpuInfo.memoryTotal * gpuInfo.memoryUtil / 10) * 10 / 1000, {color: "white"})} /
132 | ${getStyledText(Math.round(gpuInfo.memoryTotal / 10) * 10 / 1000, {"font-size": "10pt"})}
133 | ${getStyledText("GB", {"font-size": "8pt"})}`;
134 | gpuTemperatureEl.innerHTML = `${getStyledText("Temp", nameStyle)}: ${getStyledText(gpuInfo.temperature, "white")}°`;
135 |
136 | memoryUsageEl.innerHTML = `${getStyledText("RAM", nameStyle)}:
137 | ${getStyledText(Math.round(data.virtual_memory.used / (10 ** 8)) * (10 ** 8) / (10 ** 9), {color: "white"})} /
138 | ${getStyledText(Math.round(data.virtual_memory.total / (10 ** 8)) * (10 ** 8) / (10 ** 9), {"font-size": "10pt"})}
139 | ${getStyledText("GB", {"font-size": "8pt"})}`;
140 |
141 | }
142 |
143 | // Function to fetch and update system status
144 | async function fetchSystemStatus() {
145 | try {
146 | const response = await fetch('/komojini/systemstatus');
147 | const data = await response.json();
148 |
149 | if (data.cpu !== null || data.gpu !== null) {
150 | updateSystemStatus(data);
151 | }
152 | } catch (error) {
153 | console.error('Error fetching system status:', error);
154 | }
155 | }
156 | menu.append(separator);
157 | menu.append(systemStatus);
158 |
159 | // Fetch system status initially and every 1 seconds
160 | fetchSystemStatus();
161 | setInterval(fetchSystemStatus, 500);
162 | },
163 | async beforeRegisterNodeDef(nodeType, nodeData, app) {
164 |
165 | },
166 | })
--------------------------------------------------------------------------------
/js/utils.js:
--------------------------------------------------------------------------------
1 | import { api } from '/scripts/api.js'
2 | import * as shared from './comfy_shared.js'
3 | import { app } from '/scripts/app.js'
4 |
5 |
6 |
7 | export const findWidgetByName = (node, name) => {
8 | return node.widgets ? node.widgets.find((w) => w.name === name) : null;
9 | }
10 |
11 | export const doesInputWithNameExist = (node, name) => {
12 | return node.inputs ? node.inputs.some((input) => input.name === name) : false;
13 | }
14 |
15 | export const findWidgetsByType = (node, type) => {
16 | var widgets = [];
17 | node.widgets.map((widget) => {
18 | if (widget.type === type) {
19 | widgets.push(widget);
20 | }
21 | });
22 | return widgets;
23 | }
24 |
25 | export const getNodeByLink = (linkId, type) => app.graph.getNodeById(app.graph.links[linkId][type == "input" ? "origin_id" : "target_id"]);
26 |
27 | // node.title is visual title
28 |
29 | export function isGetter(node) {
30 | return node.type === "GetNode" || node.type?.includes?.("Getter");
31 | }
32 |
33 | export function isSetter(node) {
34 | return node.type === 'SetNode' || node.type?.includes?.("Setter");
35 | }
36 |
37 | export const isSetNode = (node) => node.type === "SetNode";
38 | export const isGetNode = (node) => node.type === "GetNode";
39 |
40 | function findSetterNode(key) {
41 | return app.graph._nodes.find((node) => isSetter(node) && findWidgetByName(node, "key").value === key);
42 | }
43 |
44 | function findGetterNode(key) {
45 | return app.graph._nodes.find((node) => isGetter(node) && findWidgetByName(node, "key").value === key);
46 | }
47 |
48 | function findSetNode(key) {
49 | return app.graph._nodes.find((node) => isSetNode(node) && node.widgets_values === key);
50 | }
51 |
52 | function findGetNode(key) {
53 | return app.graph._nodes.find((node) => isGetNode(node) && node.widgets_values === key);
54 | }
55 |
56 | export function enableOnlyRelatedNodes(targetNode) {
57 | let whitelist = {};
58 |
59 | function travelBackward(node) {
60 | whitelist[node.id] = node;
61 | if (!node.inputs) return;
62 |
63 | if (isGetter(node)) {
64 | const key = findWidgetByName(node, "key").value;
65 | const setterNode = findSetterNode(key);
66 |
67 | if (!setterNode) {
68 | shared.errorLogger('No Setter node find for key:', key);
69 | } else {
70 | shared.log("Connecting Getter & Setter", node?.widgets_values);
71 | travelBackward(setterNode);
72 | }
73 |
74 | } else if (isGetNode(node)) {
75 | const key = findWidgetByName(node, "Constant").value;
76 | const setNode = findSetNode(key);
77 |
78 | if (!setNode) {
79 | shared.errorLogger('No SetNode find for Constant:', key);
80 | } else {
81 | shared.log("Connecting GetNode & SetNode", node?.widgets_values);
82 | travelBackward(setNode);
83 | }
84 | } else {
85 | for (const input of node.inputs) {
86 | if (!input.link) continue
87 | travelBackward(getNodeByLink(input.link, "input"));
88 | }
89 | }
90 | }
91 |
92 | function travelForward(node) {
93 | whitelist[node.id] = node;
94 | travelBackward(node);
95 | if (!node.outputs) return;
96 |
97 | for (const output of node.outputs) {
98 | if (!output.links) continue;
99 | for (const link of output.links) {
100 | travelForward(getNodeByLink(link, "output"));
101 | }
102 | }
103 | }
104 |
105 | travelForward(targetNode);
106 |
107 | let notAlreadyMutedBlacklist = app.graph._nodes.filter(node => node.mode !== 2 && !whitelist[node.id]);
108 | for (const node of notAlreadyMutedBlacklist) node.mode = 2;
109 | return notAlreadyMutedBlacklist;
110 | }
111 |
112 | export function waitForPromptId() {
113 | const originalFetch = window.fetch;
114 | return new Promise(resolve => {
115 | window.fetch = async (...args) => {
116 | let [url, config] = args;
117 | const response = await originalFetch(url, config);
118 | if (url === "/prompt") {
119 | response.clone().json().then(data => resolve(data.prompt_id));
120 | window.fetch = originalFetch;
121 | }
122 | return response;
123 | };
124 | })
125 | }
126 |
127 | //https://github.com/melMass/comfy_mtb/blob/main/web/mtb_widgets.js#L309
128 | //Thanks for cool text box.
129 | export const DEBUG_STRING = (name, val) => {
130 | const fontSize = 16
131 | const w = {
132 | name,
133 | type: 'debug_text',
134 |
135 | draw: function (ctx, node, widgetWidth, widgetY, height) {
136 | // const [cw, ch] = this.computeSize(widgetWidth)
137 | shared.offsetDOMWidget(this, ctx, node, widgetWidth, widgetY, height)
138 | },
139 | computeSize: function (width) {
140 | const value = this.inputEl.innerHTML
141 | if (!value) {
142 | return [32, 32]
143 | }
144 | if (!width) {
145 | log(`No width ${this.parent.size}`)
146 | }
147 |
148 | const oldFont = app.ctx.font
149 | app.ctx.font = `${fontSize}px monospace`
150 |
151 | const words = value.split(' ')
152 | const lines = []
153 | let currentLine = ''
154 | for (const word of words) {
155 | const testLine =
156 | currentLine.length === 0 ? word : `${currentLine} ${word}`
157 |
158 | const testWidth = app.ctx.measureText(testLine).width
159 |
160 | if (testWidth > width) {
161 | lines.push(currentLine)
162 | currentLine = word
163 | } else {
164 | currentLine = testLine
165 | }
166 | }
167 | app.ctx.font = oldFont
168 | if (lines.length === 0) lines.push(currentLine)
169 |
170 | const textHeight = (lines.length + 1) * fontSize
171 |
172 | const maxLineWidth = lines.reduce(
173 | (maxWidth, line) =>
174 | Math.max(maxWidth, app.ctx.measureText(line).width),
175 | 0
176 | )
177 | const widgetWidth = Math.max(width || this.width || 32, maxLineWidth)
178 | const widgetHeight = textHeight * 1.5
179 | return [widgetWidth, widgetHeight]
180 | },
181 | onRemoved: function () {
182 | if (this.inputEl) {
183 | this.inputEl.remove()
184 | }
185 | },
186 | }
187 |
188 | Object.defineProperty(w, 'value', {
189 | get() {
190 | return this.inputEl.innerHTML
191 | },
192 | set(value) {
193 | this.inputEl.innerHTML = value
194 | this.parent?.setSize?.(this.parent?.computeSize())
195 | },
196 | })
197 |
198 | w.inputEl = document.createElement('p')
199 | w.inputEl.style.textAlign = 'center'
200 | w.inputEl.style.fontSize = `${fontSize}px`
201 | w.inputEl.style.color = 'var(--input-text)'
202 | w.inputEl.style.lineHeight = 0
203 |
204 | w.inputEl.style.fontFamily = 'monospace'
205 | w.value = val
206 | document.body.appendChild(w.inputEl)
207 |
208 | return w
209 | }
210 |
211 | export function setColorAndBgColor(type) {
212 | const colorMap = {
213 | "MODEL": LGraphCanvas.node_colors.blue,
214 | "LATENT": LGraphCanvas.node_colors.purple,
215 | "VAE": LGraphCanvas.node_colors.red,
216 | "CONDITIONING": LGraphCanvas.node_colors.brown,
217 | "IMAGE": LGraphCanvas.node_colors.pale_blue,
218 | "CLIP": LGraphCanvas.node_colors.yellow,
219 | "FLOAT": LGraphCanvas.node_colors.green,
220 | "MASK": LGraphCanvas.node_colors.cyan,
221 | "INT": { color: "#1b4669", bgcolor: "#29699c"},
222 | "CONDITIONING": { color: "#4F200D", bgcolor: "#FF8400",},
223 | "CLIP": { color: "#898121", bgcolor: "#E7B10A",},
224 |
225 | "*": { color: "#d4a828", bgcolor: "#756d58"},
226 | };
227 |
228 | const nameColorMap = {
229 | "ksampler": { color: "820300", bgcolor: "B80000"},
230 | "controlnet": { color: "FF9800", bgcolor: "5F8670"},
231 | "ipadapter": { color: "3E3232", bgcolor: "503C3C"},
232 | "checkpoint": { color: "2D3250", bgcolor: "424769"},
233 | "lora": {color: "C499F3" , bgcolor: "7360DF"},
234 | }
235 |
236 | const colors = colorMap[type];
237 | if (colors) {
238 | this.color = colors.color;
239 | this.bgcolor = colors.bgcolor;
240 | } else {
241 | // Handle the default case if needed
242 | const name = this.type.toLowerCase?.();
243 |
244 | if (!name) {
245 | return;
246 | }
247 | for (let [key, value] of Object.entries(nameColorMap)) {
248 | if (name.includes(key)) {
249 | this.color = value.color;
250 | this.bgcolor = value.bgcolor
251 | console.log(name, key);
252 | return;
253 | }
254 | }
255 | }
256 | }
257 |
--------------------------------------------------------------------------------
/js/videoinfo.js:
--------------------------------------------------------------------------------
1 | import { app } from '../../scripts/app.js'
2 |
3 |
4 | function getVideoMetadata(file) {
5 | return new Promise((r) => {
6 | const reader = new FileReader();
7 | reader.onload = (event) => {
8 | const videoData = new Uint8Array(event.target.result);
9 | const dataView = new DataView(videoData.buffer);
10 |
11 | let decoder = new TextDecoder();
12 | // Check for known valid magic strings
13 | if (dataView.getUint32(0) == 0x1A45DFA3) {
14 | //webm
15 | //see http://wiki.webmproject.org/webm-metadata/global-metadata
16 | //and https://www.matroska.org/technical/elements.html
17 | //contrary to specs, tag seems consistently at start
18 | //COMMENT + 0x4487 + packed length?
19 | //length 0x8d8 becomes 0x48d8
20 | //
21 | //description for variable length ints https://github.com/ietf-wg-cellar/ebml-specification/blob/master/specification.markdown
22 | let offset = 4 + 8; //COMMENT is 7 chars + 1 to realign
23 | while(offset < videoData.length-16) {
24 | //Check for text tags
25 | if (dataView.getUint16(offset) == 0x4487) {
26 | //check that name of tag is COMMENT
27 | const name = String.fromCharCode(...videoData.slice(offset-7,offset));
28 | if (name === "COMMENT") {
29 | let vint = dataView.getUint32(offset+2);
30 | let n_octets = Math.clz32(vint)+1;
31 | if (n_octets < 4) {//250MB sanity cutoff
32 | let length = (vint >> (8*(4-n_octets))) & ~(1 << (7*n_octets));
33 | const content = decoder.decode(videoData.slice(offset+2+n_octets, offset+2+n_octets+length));
34 | const json = JSON.parse(content);
35 | r(json);
36 | return;
37 | }
38 | }
39 | }
40 | offset+=1;
41 | }
42 | } else if (dataView.getUint32(4) == 0x66747970 && dataView.getUint32(8) == 0x69736F6D) {
43 | //mp4
44 | //see https://developer.apple.com/documentation/quicktime-file-format
45 | //Seems to make no guarantee for alignment
46 | let offset = videoData.length-4;
47 | while (offset > 16) {//rough safe guess
48 | if (dataView.getUint32(offset) == 0x64617461) {//any data tag
49 | if (dataView.getUint32(offset - 8) == 0xa9636d74) {//cmt data tag
50 | let type = dataView.getUint32(offset+4); //seemingly 1
51 | let locale = dataView.getUint32(offset+8); //seemingly 0
52 | let size = dataView.getUint32(offset-4) - 4*4;
53 | const content = decoder.decode(videoData.slice(offset+12, offset+12+size));
54 | const json = JSON.parse(content);
55 | r(json);
56 | return;
57 | }
58 | }
59 |
60 | offset-=1;
61 | }
62 | } else {
63 | console.error("Unknown magic: " + dataView.getUint32(0))
64 | r();
65 | return;
66 | }
67 |
68 | };
69 |
70 | reader.readAsArrayBuffer(file);
71 | });
72 | }
73 | function isVideoFile(file) {
74 | if (file?.name?.endsWith(".webm")) {
75 | return true;
76 | }
77 | if (file?.name?.endsWith(".mp4")) {
78 | return true;
79 | }
80 |
81 | return false;
82 | }
83 |
84 | let originalHandleFile = app.handleFile;
85 | app.handleFile = handleFile;
86 | async function handleFile(file) {
87 | if (file?.type?.startsWith("video/") || isVideoFile(file)) {
88 | const videoInfo = await getVideoMetadata(file);
89 | if (videoInfo) {
90 | if (videoInfo.workflow) {
91 |
92 | app.loadGraphData(videoInfo.workflow);
93 | }
94 | //Potentially check for/parse A1111 metadata here.
95 | }
96 | } else {
97 | return await originalHandleFile.apply(this, arguments);
98 | }
99 | }
100 |
101 | //hijack comfy-file-input to allow webm/mp4
102 | document.getElementById("comfy-file-input").accept += ",video/webm,video/mp4";
103 |
--------------------------------------------------------------------------------
/js/widgethider.js:
--------------------------------------------------------------------------------
1 | import { app } from "../../scripts/app.js";
2 | import { findWidgetByName, doesInputWithNameExist } from "./utils.js";
3 |
4 | let origProps = {};
5 | let initialized = false;
6 |
7 | const HIDDEN_TAG = "komojinihide";
8 |
9 | const WIDGET_HEIGHT = 24;
10 |
11 | // Toggle Widget + change size
12 | function toggleWidget(node, widget, show = false, suffix = "") {
13 | if (!widget || doesInputWithNameExist(node, widget.name)) return;
14 |
15 | // Store the original properties of the widget if not already stored
16 | if (!origProps[widget.name]) {
17 | origProps[widget.name] = { origType: widget.type, origComputeSize: widget.computeSize };
18 | }
19 |
20 | const origSize = node.size;
21 |
22 | // Set the widget type and computeSize based on the show flag
23 | widget.type = show ? origProps[widget.name].origType : HIDDEN_TAG + suffix;
24 | widget.computeSize = show ? origProps[widget.name].origComputeSize : () => [0, -4];
25 |
26 | // Recursively handle linked widgets if they exist
27 | widget.linkedWidgets?.forEach(w => toggleWidget(node, w, ":" + widget.name, show));
28 |
29 | // Calculate the new height for the node based on its computeSize method
30 | const newHeight = node.computeSize()[1];
31 | node.setSize([node.size[0], newHeight]);
32 | }
33 |
34 |
35 | // Use for Multiline Widget Nodes (aka Efficient Loaders)
36 | function toggleWidget_2(node, widget, show = false, suffix = "") {
37 | if (!widget || doesInputWithNameExist(node, widget.name)) return;
38 |
39 | const isCurrentlyVisible = widget.type !== HIDDEN_TAG + suffix;
40 | if (isCurrentlyVisible === show) return; // Early exit if widget is already in the desired state
41 |
42 | if (!origProps[widget.name]) {
43 | origProps[widget.name] = { origType: widget.type, origComputeSize: widget.computeSize };
44 | }
45 |
46 | widget.type = show ? origProps[widget.name].origType : HIDDEN_TAG + suffix;
47 | widget.computeSize = show ? origProps[widget.name].origComputeSize : () => [0, -4];
48 |
49 | if (initialized){
50 | const adjustment = show ? WIDGET_HEIGHT : -WIDGET_HEIGHT;
51 | node.setSize([node.size[0], node.size[1] + adjustment]);
52 | }
53 | }
54 |
55 | const commonLoaderInputs = ["start_sec", "end_sec", "max_fps", "force_size", "frame_load_cap"];
56 | const emptyVideoInputs = ["width", "height", "frame_count", "fps"];
57 |
58 | function allSourceInputsExept(source_name) {
59 | const allSourceInputs = ["video", "upload", "youtube_url"];
60 | let sources = [];
61 | for (const source of allSourceInputs) {
62 | if (source !== source_name) {
63 | sources.push(source);
64 | }
65 | }
66 | return sources;
67 | }
68 |
69 | // New function to handle widget visibility based on input_mode
70 | function handleInputModeWidgetsVisibility(node, inputModeValue) {
71 | const videoLoaderInputs = ["video", "youtube_url", "upload", ...commonLoaderInputs];
72 |
73 | let nodeVisibilityMap = {
74 | "UltimateVideoLoader": {
75 | "filepath": [...allSourceInputsExept("video"), ...emptyVideoInputs],
76 | "YouTube": [...allSourceInputsExept("youtube_url"), ...emptyVideoInputs],
77 | "fileupload": [...allSourceInputsExept("upload"), ...emptyVideoInputs],
78 | "emptyvideo": [...allSourceInputsExept(""), ...commonLoaderInputs],
79 | },
80 | };
81 |
82 | nodeVisibilityMap["UltimateVideoLoader (simple)"] = nodeVisibilityMap["UltimateVideoLoader"];
83 |
84 | const inputModeVisibilityMap = nodeVisibilityMap[node.comfyClass];
85 |
86 | if (!inputModeVisibilityMap || !inputModeVisibilityMap[inputModeValue]) return;
87 |
88 | // Reset all widgets to visible
89 | for (const key in inputModeVisibilityMap) {
90 | for (const widgetName of inputModeVisibilityMap[key]) {
91 | const widget = findWidgetByName(node, widgetName);
92 | toggleWidget(node, widget, true);
93 | }
94 | }
95 |
96 | // Hide the specific widgets for the current input_mode value
97 | for (const widgetName of inputModeVisibilityMap[inputModeValue]) {
98 | const widget = findWidgetByName(node, widgetName);
99 | toggleWidget(node, widget, false);
100 | }
101 | }
102 |
103 |
104 | // Create a map of node titles to their respective widget handlers
105 | const nodeWidgetHandlers = {
106 | "UltimateVideoLoader": {
107 | "source": handleUltimateVideoLoaderSource,
108 | },
109 | "UltimateVideoLoader (simple)": {
110 | "source": handleUltimateVideoLoaderSource,
111 | },
112 | "BatchCreativeInterpolationNodeDynamicSettings": {
113 | "image_count": handleBatchCreativeInterpolationNodeDynamicSettingsVisibility,
114 | }
115 | };
116 |
117 | // In the main function where widgetLogic is called
118 | function widgetLogic(node, widget) {
119 | // Retrieve the handler for the current node title and widget name
120 | const handler = nodeWidgetHandlers[node.comfyClass]?.[widget.name];
121 | if (handler) {
122 | handler(node, widget);
123 | }
124 | }
125 |
126 |
127 | function handleUltimateVideoLoaderVisibility(node, source) {
128 | const baseNamesMap = {
129 | "YouTube": ["youtube_url", ...commonLoaderInputs],
130 | "filepath": ["video", ...commonLoaderInputs],
131 | "fileupload": ["fileupload", ...commonLoaderInputs],
132 | "emptyvideo": [...emptyVideoInputs],
133 | };
134 |
135 | for (var key in baseNamesMap) {
136 | var toggle;
137 | if (key === source) {
138 | toggle = true;
139 | } else {
140 | toggle = false;
141 | }
142 | var baseNames = baseNamesMap[key];
143 |
144 | for (var nodeName in baseNames) {
145 | var widget = findWidgetByName(node, nodeName);
146 | toggleWidget(node, widget, toggle);
147 | }
148 | }
149 | }
150 |
151 | function handleBatchCreativeInterpolationNodeDynamicSettingsVisibility(node, widget) {
152 | handleVisibility(node, widget.value, "BatchCreativeInterpolationNodeDynamicSettings")
153 | }
154 |
155 | const MAX_COUNT_VALUE = 50
156 |
157 | function handleUltimateVideoLoaderSource(node, widget) {
158 | handleInputModeWidgetsVisibility(node, widget.value);
159 | handleUltimateVideoLoaderVisibility(node, widget.value);
160 | }
161 |
162 | function handleVisibility(node, countValue, nodeType) {
163 | const baseNamesMap = {
164 | "BatchCreativeInterpolationNodeDynamicSettings": [
165 | "frame_distribution",
166 | "key_frame_influence",
167 | "min_strength_value",
168 | "max_strength_value",
169 | ],
170 | }
171 | const baseNames = baseNamesMap[nodeType]
172 |
173 |
174 | for (let i=1; i <= MAX_COUNT_VALUE; i++) {
175 | const widgets = baseNames.map((n) => findWidgetByName(node, `${n}_${i}`))
176 |
177 | if (i <= countValue) {
178 | widgets?.forEach((w) => {
179 |
180 | toggleWidget(node, w, true)}
181 | )
182 | } else {
183 | widgets?.forEach((w) => toggleWidget(node, w, false))
184 | }
185 | }
186 | }
187 |
188 | app.registerExtension({
189 | name: "komojini.widgethider",
190 | nodeCreated(node) {
191 | for (const w of node.widgets || []) {
192 | let widgetValue = w.value;
193 |
194 | // Store the original descriptor if it exists
195 | let originalDescriptor = Object.getOwnPropertyDescriptor(w, 'value');
196 |
197 | widgetLogic(node, w);
198 |
199 | Object.defineProperty(w, 'value', {
200 | get() {
201 | // If there's an original getter, use it. Otherwise, return widgetValue.
202 | let valueToReturn = originalDescriptor && originalDescriptor.get
203 | ? originalDescriptor.get.call(w)
204 | : widgetValue;
205 |
206 | return valueToReturn;
207 | },
208 | set(newVal) {
209 |
210 | // If there's an original setter, use it. Otherwise, set widgetValue.
211 | if (originalDescriptor && originalDescriptor.set) {
212 | originalDescriptor.set.call(w, newVal);
213 | } else {
214 | widgetValue = newVal;
215 | }
216 |
217 | widgetLogic(node, w);
218 | }
219 | });
220 | }
221 | setTimeout(() => {initialized = true;}, 500);
222 | }
223 | });
224 |
225 |
--------------------------------------------------------------------------------
/komojini_server.py:
--------------------------------------------------------------------------------
1 | import server
2 | import folder_paths
3 | import os
4 | import time
5 | import psutil
6 | import GPUtil
7 | import subprocess
8 |
9 | from .nodes.utils import is_url, get_sorted_dir_files_from_directory, ffmpeg_path
10 | from comfy.k_diffusion.utils import FolderOfImages
11 | import nodes
12 |
13 | DEBUG = True
14 |
15 | from pprint import pprint
16 |
17 | def print_info(info):
18 | pprint(f"🔥 - {info}")
19 |
20 | web = server.web
21 |
22 | def is_safe(path):
23 | if "KOMOJINI_STRICT_PATHS" not in os.environ:
24 | return True
25 | basedir = os.path.abspath('.')
26 | try:
27 | common_path = os.path.commonpath([basedir, path])
28 | except:
29 | #Different drive on windows
30 | return False
31 | return common_path == basedir
32 |
33 |
34 | @server.PromptServer.instance.routes.get("/komojini/systemstatus")
35 | async def get_system_status(request):
36 | system_status = {
37 | "cpu": None,
38 | "gpus": None,
39 | "cpustats": None,
40 | "virtual_memory": dict(psutil.virtual_memory()._asdict()), # {'total': 66480500736, 'available': 61169692672, 'percent': 8.0, 'used': 4553539584, 'free': 41330143232, 'active': 13218308096, 'inactive': 10867519488, 'buffers': 374468608, 'cached': 20222349312, 'shared': 15781888, 'slab': 567083008}
41 | }
42 |
43 | # Get CPU usage
44 | cpu_usage = psutil.cpu_percent(interval=1)
45 | cpu_stats = psutil.cpu_stats() # scpustats(ctx_switches=17990329, interrupts=17614856, soft_interrupts=10633860, syscalls=0)
46 | cpu_times_percent = psutil.cpu_times_percent()
47 | cpu_count = psutil.cpu_count()
48 |
49 |
50 | # system_status["cpustats"] = cpu.__dict__
51 | system_status['cpu'] = {
52 | "cpu_usage": cpu_usage,
53 | "cpu_times_percent": cpu_times_percent,
54 | "cpu_count": cpu_count,
55 | }
56 | # Get GPU usage
57 | try:
58 | gpu = GPUtil.getGPUs()[0] # Assuming you have only one GPU
59 | gpus = GPUtil.getGPUs()
60 | system_status["gpus"] = [gpu.__dict__ for gpu in gpus]
61 |
62 | except Exception as e:
63 | system_status['gpus'] = None # Handle the case where GPU information is not available
64 |
65 | return web.json_response(system_status)
66 |
67 |
68 | @server.PromptServer.instance.routes.get("/komojini/debug")
69 | async def get_debug(request):
70 | return web.json_response({"enabled": True})
71 |
72 |
73 | @server.PromptServer.instance.routes.get("/komojini/onqueue")
74 | async def on_queue(request):
75 | pass
76 |
77 | @server.PromptServer.instance.routes.get("/viewvideo")
78 | async def view_video(request):
79 | query = request.rel_url.query
80 | if "filename" not in query:
81 | return web.Response(status=404)
82 | filename = query["filename"]
83 |
84 | #Path code misformats urls on windows and must be skipped
85 | if is_url(filename):
86 | file = filename
87 | else:
88 | filename, output_dir = folder_paths.annotated_filepath(filename)
89 |
90 | type = request.rel_url.query.get("type", "output")
91 | if type == "path":
92 | #special case for path_based nodes
93 | #NOTE: output_dir may be empty, but non-None
94 | output_dir, filename = os.path.split(filename)
95 | if output_dir is None:
96 | output_dir = folder_paths.get_directory_by_type(type)
97 |
98 | if output_dir is None:
99 | return web.Response(status=400)
100 |
101 | if not is_safe(output_dir):
102 | return web.Response(status=403)
103 |
104 | if "subfolder" in request.rel_url.query:
105 | output_dir = os.path.join(output_dir, request.rel_url.query["subfolder"])
106 |
107 | filename = os.path.basename(filename)
108 | file = os.path.join(output_dir, filename)
109 |
110 | if query.get('format', 'video') == 'folder':
111 | if not os.path.isdir(file):
112 | return web.Response(status=404)
113 | else:
114 | if not os.path.isfile(file):
115 | return web.Response(status=404)
116 |
117 | if query.get('format', 'video') == "folder":
118 | #Check that folder contains some valid image file, get it's extension
119 | #ffmpeg seems to not support list globs, so support for mixed extensions seems unfeasible
120 | os.makedirs(folder_paths.get_temp_directory(), exist_ok=True)
121 | concat_file = os.path.join(folder_paths.get_temp_directory(), "image_sequence_preview.txt")
122 | skip_first_images = int(query.get('skip_first_images', 0))
123 | select_every_nth = int(query.get('select_every_nth', 1))
124 | valid_images = get_sorted_dir_files_from_directory(file, skip_first_images, select_every_nth, FolderOfImages.IMG_EXTENSIONS)
125 | if len(valid_images) == 0:
126 | return web.Response(status=400)
127 | with open(concat_file, "w") as f:
128 | f.write("ffconcat version 1.0\n")
129 | for path in valid_images:
130 | f.write("file '" + os.path.abspath(path) + "'\n")
131 | f.write("duration 0.125\n")
132 | in_args = ["-safe", "0", "-i", concat_file]
133 | else:
134 | in_args = ["-an", "-i", file]
135 |
136 | args = [ffmpeg_path, "-v", "error"] + in_args
137 | vfilters = []
138 | if int(query.get('force_rate',0)) != 0:
139 | vfilters.append("fps=fps="+query['force_rate'] + ":round=up:start_time=0.001")
140 | if int(query.get('skip_first_frames', 0)) > 0:
141 | vfilters.append(f"select=gt(n\\,{int(query['skip_first_frames'])-1})")
142 | if int(query.get('select_every_nth', 1)) > 1:
143 | vfilters.append(f"select=not(mod(n\\,{query['select_every_nth']}))")
144 | if query.get('force_size','Disabled') != "Disabled":
145 | size = query['force_size'].split('x')
146 | if size[0] == '?' or size[1] == '?':
147 | size[0] = "-2" if size[0] == '?' else f"'min({size[0]},iw)'"
148 | size[1] = "-2" if size[1] == '?' else f"'min({size[1]},ih)'"
149 | else:
150 | #Aspect ratio is likely changed. A more complex command is required
151 | #to crop the output to the new aspect ratio
152 | ar = float(size[0])/float(size[1])
153 | vfilters.append(f"crop=if(gt({ar}\\,a)\\,iw\\,ih*{ar}):if(gt({ar}\\,a)\\,iw/{ar}\\,ih)")
154 | size = ':'.join(size)
155 | vfilters.append(f"scale={size}")
156 | vfilters.append("setpts=PTS-STARTPTS")
157 | if len(vfilters) > 0:
158 | args += ["-vf", ",".join(vfilters)]
159 | if int(query.get('frame_load_cap', 0)) > 0:
160 | args += ["-frames:v", query['frame_load_cap']]
161 | #TODO:reconsider adding high frame cap/setting default frame cap on node
162 |
163 | args += ['-c:v', 'libvpx-vp9','-deadline', 'realtime', '-cpu-used', '8', '-f', 'webm', '-']
164 |
165 | try:
166 | with subprocess.Popen(args, stdout=subprocess.PIPE) as proc:
167 | try:
168 | resp = web.StreamResponse()
169 | resp.content_type = 'video/webm'
170 | resp.headers["Content-Disposition"] = f"filename=\"{filename}\""
171 | await resp.prepare(request)
172 | while True:
173 | bytes_read = proc.stdout.read()
174 | if bytes_read is None:
175 | #TODO: check for timeout here
176 | time.sleep(.1)
177 | continue
178 | if len(bytes_read) == 0:
179 | break
180 | await resp.write(bytes_read)
181 | except ConnectionResetError as e:
182 | #Kill ffmpeg before stdout closes
183 | proc.kill()
184 | except BrokenPipeError as e:
185 | pass
186 | return resp
187 |
188 | @server.PromptServer.instance.routes.get("/getpath")
189 | async def get_path(request):
190 | query = request.rel_url.query
191 | if "path" not in query:
192 | return web.Response(status=404)
193 | path = os.path.abspath(query["path"])
194 |
195 | if not os.path.exists(path) or not is_safe(path):
196 | return web.json_response([])
197 |
198 | #Use get so None is default instead of keyerror
199 | valid_extensions = query.get("extensions")
200 | valid_items = []
201 | for item in os.scandir(path):
202 | try:
203 | if item.is_dir():
204 | valid_items.append(item.name + "/")
205 | continue
206 | if valid_extensions is None or item.name.split(".")[-1] in valid_extensions:
207 | valid_items.append(item.name)
208 | except OSError:
209 | #Broken symlinks can throw a very unhelpful "Invalid argument"
210 | pass
211 |
212 | return web.json_response(valid_items)
213 |
214 | def is_prompt_node_type_of(node_value, node_type: str) -> bool:
215 | return node_type in node_value.get("class_type", "") or node_type in node_value.get("_meta", {}).get("tile", "")
216 |
217 |
218 | def is_workflow_node_type_of(node_value, node_type: str) -> bool:
219 | return node_type in node_value.get("type", "")
220 |
221 | def test_prompt(json_data):
222 | import json
223 |
224 | try:
225 | with open(".custom_nodes/komojini-comfyui-nodes/json_data", "w") as json_file:
226 | json_str = json.dumps(json_data, indent=4)
227 | json.dump(json_data, json_file)
228 | except Exception as e:
229 | print_info("Failed to save json data.")
230 | pass
231 |
232 | print_info("Got prompt")
233 |
234 | prompt = json_data['prompt']
235 | print(f"len(prompt): {len(prompt)}")
236 |
237 |
238 | from .nodes.cache_data import CACHED_MAP
239 |
240 | def search_setter_getter_connected_nodes(json_data):
241 | key_to_getter_node_ids = {}
242 | key_to_setter_node_id = {}
243 |
244 | prompt = json_data["prompt"]
245 | for node_id, v in prompt.items():
246 | if "class_type" in v and "inputs" in v:
247 | class_type: str = v["class_type"]
248 | inputs = v["inputs"]
249 |
250 | if is_prompt_node_type_of(v, "Get"):
251 | key = inputs.get("key")
252 | if not key:
253 | continue
254 |
255 | if class_type.endswith("CachedGetter") and CACHED_MAP.get(key, None) is not None:
256 | continue
257 |
258 | if key in key_to_getter_node_ids:
259 | key_to_getter_node_ids[key].append(node_id)
260 | else:
261 | key_to_getter_node_ids[key] = [node_id]
262 | elif is_prompt_node_type_of(v, "Set"):
263 | key = inputs.get("key")
264 | if not key:
265 | continue
266 | key_to_setter_node_id[key] = node_id
267 |
268 | return key_to_getter_node_ids, key_to_setter_node_id
269 |
270 |
271 | def search_setter_getter_from_workflow_test(json_data):
272 | key_to_getter_node_ids = {}
273 | key_to_setter_node_id = {}
274 |
275 | workflow = json_data["extra_data"]["extra_pnginfo"]["workflow"]
276 | last_node_id = workflow["last_node_id"]
277 | last_link_id = workflow["last_link_id"]
278 | nodes = workflow["nodes"]
279 | links = workflow["links"]
280 | prompt = json_data["prompt"]
281 |
282 | not_included_nodes_count = 0
283 | for node in nodes:
284 | # if node["id"] in prompt:
285 | # continue
286 | if node["mode"] == 0 and node["id"] not in prompt:
287 | # print_info(f"node not in prompt. node: {node}")
288 | not_included_nodes_count += 1
289 | inputs = node.get("inputs", [])
290 | widget_values = node.get("widget_values")
291 |
292 | # {"name": "", "type": "", "link": 320}
293 | # prompt[node["id"]] = {
294 | # "inputs": {
295 |
296 | # },
297 | # "class_type": node["type"],
298 | # "_meta": {
299 | # "title": node[""],
300 | # }
301 | # }
302 | if node.get("type", "").endswith("Setter"):
303 | key = node["widgets_values"][0]
304 | elif node.get("type", "").endswith("Getter"):
305 | key = node["widgets_values"][0]
306 |
307 |
308 | """
309 | {
310 | "id": 173,
311 | "type": "JsSetter",
312 | "pos": [
313 | 6196,
314 | 9558
315 | ],
316 | "size": {
317 | "0": 210,
318 | "1": 58
319 | },
320 | "flags": {},
321 | "order": 115,
322 | "mode": 0,
323 | "inputs": [
324 | {
325 | "name": "IMAGE",
326 | "type": "IMAGE",
327 | "link": 235
328 | }
329 | ],
330 | "outputs": [
331 | {
332 | "name": "IMAGE",
333 | "type": "IMAGE",
334 | "links": [
335 | 236
336 | ],
337 | "slot_index": 0
338 | }
339 | ],
340 | "title": "Set_STEERABLE_IMAGES",
341 | "properties": {
342 | "previousName": "STEERABLE_IMAGES"
343 | },
344 | "widgets_values": [
345 | "STEERABLE_IMAGES"
346 | ],
347 | "color": "#2a363b",
348 | "bgcolor": "#3f5159"
349 | },
350 | """
351 | print_info(f"{not_included_nodes_count} Nodes not included in prompt but is activated")
352 | return key_to_getter_node_ids, key_to_setter_node_id
353 |
354 | def search_setter_getter_from_workflow(json_data):
355 | key_to_getter_node_ids = {}
356 | key_to_setter_node_id = {}
357 |
358 | workflow = json_data["extra_data"]["extra_pnginfo"]["workflow"]
359 | nodes = workflow["nodes"]
360 | prompt = json_data["prompt"]
361 |
362 | not_included_nodes_count = 0
363 | for node in nodes:
364 | if node["mode"] == 0 and node["id"] not in prompt:
365 | not_included_nodes_count += 1
366 |
367 | print_info(f"{not_included_nodes_count} Nodes not included in prompt but is activated")
368 | return key_to_getter_node_ids, key_to_setter_node_id
369 |
370 |
371 | def connect_to_from_nodes(json_data):
372 | prompt = json_data["prompt"]
373 | key_to_getter_node_ids, key_to_setter_node_id = search_setter_getter_connected_nodes(json_data)
374 | for getter_key, getter_node_ids in key_to_getter_node_ids.items():
375 | if getter_key in key_to_setter_node_id:
376 | setter_node_id = key_to_setter_node_id[getter_key]
377 |
378 | for getter_node_id in getter_node_ids:
379 | # if "*" in prompt[getter_node_id]["inputs"]:
380 | prompt[getter_node_id]["inputs"]["*"] = [setter_node_id, 0]
381 | # elif "value" in prompt[getter_node_id]["inputs"]:
382 | prompt[getter_node_id]["inputs"]["value"] = [setter_node_id, 0]
383 | # else:
384 | # print(f"[WARN] Komojini-ComfyUI-CustonNodes: There is no 'Setter' node in the workflow for key: {getter_key}, inputs: {prompt[getter_node_id]['inputs']}")
385 |
386 | print(f"Connected getter {getter_node_id}: {json_data['prompt'][getter_node_id]}")
387 | if setter_node_id not in prompt:
388 | print(f"[WARN] setter node id for key({getter_key}) not in prompt, setter_node_id: {setter_node_id}")
389 | else:
390 | print(f"[WARN] Komojini-ComfyUI-CustonNodes: There is no 'Setter' node in the workflow for key: {getter_key}")
391 |
392 |
393 | def workflow_update(json_data):
394 | prompt = json_data["prompt"]
395 | for k, v in prompt.items():
396 | if "class_type" in v and "inputs" in v:
397 | class_type = v["class_type"]
398 | inputs = v["inputs"]
399 |
400 | class_ = nodes.NODE_CLASS_MAPPINGS[class_type]
401 | if hasattr(class_, "OUTPUT_NODE") and class_.OUTPUT_NODE == True:
402 | pass
403 | if class_type == "Getter":
404 | id = inputs["key"]
405 |
406 |
407 | def on_prompt_handler(json_data):
408 | try:
409 | # test_prompt(json_data)
410 | search_setter_getter_from_workflow(json_data)
411 | connect_to_from_nodes(json_data)
412 |
413 | except Exception as e:
414 | print_info(f"[WARN] Komojini-ComfyUI-CustomNodes: Error on prompt\n{e}")
415 | return json_data
416 |
417 | server.PromptServer.instance.add_on_prompt_handler(on_prompt_handler)
--------------------------------------------------------------------------------
/nodes/__init__.py:
--------------------------------------------------------------------------------
1 | from .video_loaders import YouTubeVideoLoader, UltimateVideoLoader
2 | from .image_merger import ImageMerger
3 | from .cacheable_nodes import (
4 | KSamplerCacheable,
5 | KSamplerAdvancedCacheable,
6 | )
7 | from .image_nodes import *
8 | from .komojini_nodes import *
9 |
10 | __all__ = [
11 | "YouTubeVideoLoader",
12 | "ImageMerger",
13 | "UltimateVideoLoader",
14 | "KSamplerCacheable",
15 | "KSamplerAdvancedCacheable",
16 | "From",
17 | "To",
18 | "ImageGetter",
19 | "FlowBuilder",
20 | "FlowBuilderSetter",
21 | "CachedGetter",
22 | "DragNUWAImageCanvas",
23 | "ImageCropByRatio",
24 | "ImageCropByRatioAndResize",
25 | "ImagesCropByRatioAndResizeBatch",
26 | "BatchCreativeInterpolationNodeDynamicSettings",
27 | ]
28 |
--------------------------------------------------------------------------------
/nodes/cache_data.py:
--------------------------------------------------------------------------------
1 |
2 |
3 | CACHED_MAP = {}
4 |
--------------------------------------------------------------------------------
/nodes/cacheable_nodes.py:
--------------------------------------------------------------------------------
1 | import os
2 | import functools
3 |
4 | import comfy
5 | import folder_paths
6 | from nodes import common_ksampler, KSampler
7 | from .logger import logger
8 | from .utils import to_hashable, hashable_to_dict
9 |
10 |
11 | CACHE_MAX_SIZE = 4
12 |
13 | def to_cacheable_function(func: callable, maxsize=CACHE_MAX_SIZE, typed=False):
14 |
15 | @functools.lru_cache(maxsize=maxsize, typed=typed)
16 | def cacheable_function(kwargs):
17 | kwargs = hashable_to_dict(kwargs)
18 | return func(**kwargs)
19 |
20 | return cacheable_function
21 |
22 |
23 | class KSamplerCacheable(KSampler):
24 | def __init__(self):
25 | super().__init__()
26 | self.call = to_cacheable_function(super().sample)
27 |
28 | FUNCTION = "cache_call"
29 | def cache_call(self, **kwargs):
30 | kwargs = to_hashable(kwargs)
31 | return self.call(kwargs)
32 |
33 |
34 | class KSamplerAdvancedCacheable:
35 | def __init__(self):
36 | self.call = to_cacheable_function(common_ksampler)
37 |
38 | @classmethod
39 | def INPUT_TYPES(s):
40 | return {
41 | "required": {
42 | "model": ("MODEL",),
43 | "add_noise": (["enable", "disable"],),
44 | "noise_seed": (
45 | "INT",
46 | {"default": 0, "min": 0, "max": 0xFFFFFFFFFFFFFFFF},
47 | ),
48 | "steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
49 | "cfg": (
50 | "FLOAT",
51 | {
52 | "default": 8.0,
53 | "min": 0.0,
54 | "max": 100.0,
55 | "step": 0.1,
56 | "round": 0.01,
57 | },
58 | ),
59 | "sampler_name": (comfy.samplers.KSampler.SAMPLERS,),
60 | "scheduler": (comfy.samplers.KSampler.SCHEDULERS,),
61 | "positive": ("CONDITIONING",),
62 | "negative": ("CONDITIONING",),
63 | "latent_image": ("LATENT",),
64 | "start_at_step": ("INT", {"default": 0, "min": 0, "max": 10000}),
65 | "end_at_step": ("INT", {"default": 10000, "min": 0, "max": 10000}),
66 | "return_with_leftover_noise": (["disable", "enable"],),
67 | }
68 | }
69 |
70 | RETURN_TYPES = ("LATENT",)
71 | FUNCTION = "sample"
72 |
73 | CATEGORY = "komojini/sampling"
74 |
75 | def sample(
76 | self,
77 | model,
78 | add_noise,
79 | noise_seed,
80 | steps,
81 | cfg,
82 | sampler_name,
83 | scheduler,
84 | positive,
85 | negative,
86 | latent_image,
87 | start_at_step,
88 | end_at_step,
89 | return_with_leftover_noise,
90 | denoise=1.0,
91 | ):
92 | force_full_denoise = True
93 | if return_with_leftover_noise == "enable":
94 | force_full_denoise = False
95 | disable_noise = False
96 | if add_noise == "disable":
97 | disable_noise = True
98 |
99 | kwargs = {
100 | "model": model,
101 | "seed": noise_seed,
102 | "steps": steps,
103 | "cfg": cfg,
104 | "sampler_name": sampler_name,
105 | "scheduler": scheduler,
106 | "positive": positive,
107 | "negative": negative,
108 | "latent": latent_image,
109 | "denoise": denoise,
110 | "disable_noise": disable_noise,
111 | "start_step": start_at_step,
112 | "last_step": end_at_step,
113 | "force_full_denoise": force_full_denoise,
114 | }
115 | kwargs = to_hashable(kwargs)
116 |
117 | return self.call(kwargs)
118 |
119 |
120 | CACHED_STRINGS = {}
121 |
122 | class TextCacheable:
123 | @classmethod
124 | def INPUT_TYPES(cls):
125 | return {
126 | "required": {
127 | "text": ("STRING", {"default": "", "multiline": True}),
128 | },
129 | "hidden": {"unique_id": "UNIQUE_ID"},
130 | }
131 |
132 | FUNCTION = "call"
133 | RETURN_TYPES = ("STRING", )
134 | RETURN_NAMES = ("text", )
135 | def call(self, text, unique_id=None):
136 | if unique_id in CACHED_STRINGS:
137 | CACHED_STRINGS[unique_id].append(text)
138 | else:
139 | CACHED_STRINGS[unique_id] = [text]
140 |
141 | return (text, )
--------------------------------------------------------------------------------
/nodes/image_merger.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import os
3 | import numpy as np
4 | import torch
5 | from typing import Tuple, Optional
6 |
7 |
8 | def tensor_to_int(tensor, bits):
9 | #TODO: investigate benefit of rounding by adding 0.5 before clip/cast
10 | tensor = tensor.cpu().numpy() * (2**bits-1)
11 | return np.clip(tensor, 0, (2**bits-1))
12 | def tensor_to_shorts(tensor):
13 | return tensor_to_int(tensor, 16).astype(np.uint16)
14 | def tensor_to_bytes(tensor):
15 | return tensor_to_int(tensor, 8).astype(np.uint8)
16 |
17 | def line_equation(x1, y1, x2, y2, x, y):
18 | return (x - x1) * (y2 - y1) - (y - y1) * (x2 - x1)
19 |
20 | def line_mask_equation(x1, y1, x2, y2, x, y, size):
21 | distance = np.abs((x - x1) * (y2 - y1) - (y - y1) * (x2 - x1)) / np.sqrt((y2 - y1)**2 + (x2 - x1)**2)
22 | return distance <= size / 2
23 |
24 | def merge_images(images1, images2, x1, y1, x2, y2, line_thickness):
25 | batch_size, height, width, channels = images1.shape
26 |
27 | # Create 2D grid of (x, y) coordinates
28 | y_coords, x_coords = torch.meshgrid(torch.arange(height), torch.arange(width))
29 | coords = torch.stack([x_coords, y_coords], dim=-1)
30 |
31 | # Calculate line equation for each point in the grid
32 | line_values = line_equation(x1, y1, x2, y2, coords[..., 0], coords[..., 1])
33 |
34 | # Create a mask based on the line equation
35 | mask = line_values > 0
36 |
37 | # Broadcast the mask to the shape of the images
38 | mask = mask.unsqueeze(0).unsqueeze(3).expand(batch_size, height, width, channels)
39 |
40 | # Combine the corresponding regions from each image
41 | merged_images = images1 * mask.float() + images2 * (~mask).float()
42 |
43 | if line_thickness:
44 | try:
45 | line_mask_values = line_mask_equation(x1, y1, x2, y2, coords[..., 0], coords[..., 1], line_thickness)
46 | line_mask_values = line_mask_values.unsqueeze(0).unsqueeze(3).expand(batch_size, height, width, channels)
47 | merged_images = merged_images * (~line_mask_values).float() + line_mask_values.float()
48 | except Exception as e:
49 | print(e)
50 |
51 | return merged_images
52 |
53 |
54 | class ImageMerger:
55 | @classmethod
56 | def INPUT_TYPES(s):
57 |
58 | return {
59 | "required": {
60 | "images_1": ("IMAGE",),
61 | "images_2": ("IMAGE",),
62 | "divide_points": ("STRING", {"default": "(50%, 0);(50%, 100%)"}),
63 | "line_thickness": ("INT", {"default": 0, "min": 0, "max": 1000, "step": 1}),
64 | },
65 | }
66 |
67 | FUNCTION = "merge_video"
68 | CATEGORY = "komojini/Image"
69 | RETURN_NAMES = ("images", "num_images",)
70 | RETURN_TYPES = ("IMAGE", "INT",)
71 |
72 | def merge_video(self, images_1, images_2, divide_points, line_thickness):
73 | # image.shape = (num_imgs, height, width, channels)
74 | num_images, height, width, _ = images_1.shape
75 | print(f"start merge images, images_1.shape: {images_1.shape}")
76 | marks = []
77 | for mark_string in divide_points.split(";"):
78 | xy = self.get_xy(mark_string, height, width)
79 | if not xy:
80 | continue
81 | marks.append(xy)
82 |
83 | # TODO: implement using more than 2 marks.
84 | if len(marks) != 2:
85 | raise NotImplemented("currently only 2 marks are available.")
86 |
87 | else:
88 | x1, y1 = marks[0]
89 | x2, y2 = marks[1]
90 | merged_images = merge_images(
91 | images1=images_1,
92 | images2=images_2,
93 | x1=x1, y1=y1, x2=x2, y2=y2,
94 | line_thickness=line_thickness,
95 | )
96 |
97 | print(f"merged_images.shape: {merged_images.shape}")
98 | return (merged_images, len(merged_images))
99 |
100 |
101 | @staticmethod
102 | def get_xy(mark_string: str, height: int, width: int) -> Optional[Tuple[int, int]]:
103 | mark_string = mark_string.strip()
104 | if not mark_string.startswith("(") or not mark_string.endswith(")"):
105 | print(f"mark_string is not appropriate, mark_string: {mark_string}")
106 | return None
107 | mark_string = mark_string[1:-1]
108 | x, y = mark_string.split(",")
109 | x, y = x.strip(), y.strip()
110 | if x.endswith("%"):
111 | x = x[:-1]
112 | x = int(x)
113 | x = int(width * x / 100)
114 | else:
115 | x = int(x)
116 |
117 | if y.endswith("%"):
118 | y = y[:-1]
119 | y = int(y)
120 | y = int(height * y / 100)
121 | else:
122 | y = int(y)
123 |
124 | return x, y
125 |
--------------------------------------------------------------------------------
/nodes/image_nodes.py:
--------------------------------------------------------------------------------
1 | import warnings
2 | warnings.filterwarnings('ignore', module="torchvision")
3 | import ast
4 | import math
5 | import random
6 | import operator as op
7 | import numpy as np
8 |
9 | import torch
10 | import torch.nn.functional as F
11 |
12 | import torchvision.transforms.v2 as T
13 |
14 | import comfy.utils
15 |
16 | from .logger import logger
17 |
18 |
19 | MAX_RESOLUTION = 8192
20 |
21 | def p(image):
22 | return image.permute([0,3,1,2])
23 | def pb(image):
24 | return image.permute([0,2,3,1])
25 |
26 |
27 | class ImageCropByRatio:
28 | @classmethod
29 | def INPUT_TYPES(s):
30 | return {
31 | "required": {
32 | "image": ("IMAGE",),
33 | "width_ratio": ("INT", {"default": 1, "min": 1, "max": MAX_RESOLUTION}),
34 | "height_ratio": (
35 | "INT",
36 | {"default": 1, "min": 1, "max": MAX_RESOLUTION},
37 | ),
38 | "position": (
39 | [
40 | "top",
41 | "right",
42 | "bottom",
43 | "left",
44 | "center",
45 | ],
46 | ),
47 | }
48 | }
49 |
50 | RETURN_TYPES = (
51 | "IMAGE",
52 | "INT",
53 | "INT",
54 | )
55 | RETURN_NAMES = (
56 | "IMAGE",
57 | "width",
58 | "height",
59 | )
60 | FUNCTION = "execute"
61 | CATEGORY = "essentials"
62 |
63 | def execute(self, image, width_ratio, height_ratio, position):
64 | _, oh, ow, _ = image.shape
65 |
66 | image_ratio = ow / oh
67 | target_ratio = width_ratio / height_ratio
68 |
69 | if image_ratio > target_ratio:
70 | height = oh
71 | width = target_ratio * height
72 | else:
73 | width = ow
74 | height = width / target_ratio
75 |
76 |
77 | x = round((ow - width) / 2)
78 | y = round((oh - height) / 2)
79 | width, height = round(width), round(height)
80 |
81 |
82 | if "top" in position:
83 | y = 0
84 | if "bottom" in position:
85 | y = oh - height
86 | if "left" in position:
87 | x = 0
88 | if "right" in position:
89 | x = ow - width
90 |
91 | x2 = x + width
92 | y2 = y + height
93 |
94 | if x2 > ow:
95 | x2 = ow
96 | if x < 0:
97 | x = 0
98 | if y2 > oh:
99 | y2 = oh
100 | if y < 0:
101 | y = 0
102 |
103 | image = image[:, y:y2, x:x2, :]
104 |
105 | return (
106 | image,
107 | width,
108 | height,
109 | )
110 |
111 |
112 |
113 | class ImageCropByRatioAndResize:
114 | @classmethod
115 | def INPUT_TYPES(s):
116 | return {
117 | "required": {
118 | "image": ("IMAGE",),
119 | "width_ratio_size": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION}),
120 | "height_ratio_size": (
121 | "INT",
122 | {"default": 512, "min": 1, "max": MAX_RESOLUTION},
123 | ),
124 | "position": (
125 | [
126 | "center",
127 | "top",
128 | "right",
129 | "bottom",
130 | "left",
131 | ],
132 | ),
133 | "interpolation": (["nearest", "bilinear", "bicubic", "area", "nearest-exact", "lanczos"],),
134 |
135 | }
136 | }
137 |
138 | RETURN_TYPES = (
139 | "IMAGE",
140 | "INT",
141 | "INT",
142 | )
143 | RETURN_NAMES = (
144 | "IMAGE",
145 | "width",
146 | "height",
147 | )
148 | FUNCTION = "execute"
149 | CATEGORY = "essentials"
150 |
151 | def execute(self, image, width_ratio_size, height_ratio_size, position, interpolation):
152 | _, oh, ow, _ = image.shape
153 |
154 | image_ratio = ow / oh
155 | target_ratio = width_ratio_size / height_ratio_size
156 |
157 |
158 | if image_ratio > target_ratio:
159 | height = oh
160 | width = target_ratio * height
161 | else:
162 | width = ow
163 | height = width / target_ratio
164 |
165 |
166 | x = round((ow - width) / 2)
167 | y = round((oh - height) / 2)
168 | width, height = round(width), round(height)
169 |
170 | if "top" in position:
171 | y = 0
172 | if "bottom" in position:
173 | y = oh - height
174 | if "left" in position:
175 | x = 0
176 | if "right" in position:
177 | x = ow - width
178 |
179 | x2 = x + width
180 | y2 = y + height
181 |
182 | if x2 > ow:
183 | x2 = ow
184 | if x < 0:
185 | x = 0
186 | if y2 > oh:
187 | y2 = oh
188 | if y < 0:
189 | y = 0
190 |
191 | image = image[:, y:y2, x:x2, :]
192 |
193 | width = width_ratio_size
194 | height = height_ratio_size
195 |
196 | outputs = p(image)
197 | if interpolation == "lanczos":
198 | outputs = comfy.utils.lanczos(outputs, width, height)
199 | else:
200 | outputs = F.interpolate(outputs, size=(height, width), mode=interpolation)
201 | outputs = pb(outputs)
202 |
203 | return(outputs, outputs.shape[2], outputs.shape[1],)
204 |
205 |
206 |
207 | class ImagesCropByRatioAndResizeBatch(ImageCropByRatioAndResize):
208 |
209 |
210 | FUNCTION = "list_execute"
211 | INPUT_IS_LIST = True
212 | OUTPUT_IS_LIST = (False, False, False,)
213 |
214 | def list_execute(self, image, **kwargs):
215 | logger.debug(f"{len(image)}, {kwargs}")
216 |
217 | output_images = []
218 | new_kwargs = {}
219 | for k, v in kwargs.items():
220 | if isinstance(v, list):
221 | new_kwargs[k] = v[0]
222 |
223 | width, height = new_kwargs["width_ratio_size"], new_kwargs["height_ratio_size"]
224 |
225 | for img in image:
226 | output_img, width, height = super().execute(img, **new_kwargs)
227 | output_images.append(output_img)
228 |
229 | if len(output_images) <= 1:
230 | return (output_images[0], width, height,)
231 |
232 | output_images = torch.cat(output_images, dim=0)
233 |
234 | print(f"image crop by ratio and resize image shape: {output_images.shape}")
235 |
236 | return (output_images, width, height, )
237 |
238 |
239 | __all__ = [
240 | "ImageCropByRatio",
241 | "ImageCropByRatioAndResize",
242 | "ImagesCropByRatioAndResizeBatch",
243 | ]
--------------------------------------------------------------------------------
/nodes/komojini_nodes.py:
--------------------------------------------------------------------------------
1 |
2 | from server import PromptServer
3 | import os
4 |
5 | from .logger import logger
6 |
7 |
8 | # wildcard trick is taken from pythongossss's
9 | class AnyType(str):
10 | def __ne__(self, __value: object) -> bool:
11 | return False
12 |
13 | any_typ = AnyType("*")
14 |
15 |
16 | HIDDEN_ARGS = {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "unique_id": "UNIQUE_ID"}
17 |
18 |
19 | def get_file_item(base_type, path):
20 | path_type = base_type
21 |
22 | if path == "[output]":
23 | path_type = "output"
24 | path = path[:-9]
25 | elif path == "[input]":
26 | path_type = "input"
27 | path = path[:-8]
28 | elif path == "[temp]":
29 | path_type = "temp"
30 | path = path[:-7]
31 |
32 | subfolder = os.path.dirname(path)
33 | filename = os.path.basename(path)
34 |
35 | return {
36 | "filename": filename,
37 | "subfolder": subfolder,
38 | "type": path_type
39 | }
40 |
41 |
42 | def workflow_to_map(workflow):
43 | nodes = {}
44 | links = {}
45 | for link in workflow['links']:
46 | links[link[0]] = link[1:]
47 | for node in workflow['nodes']:
48 | nodes[str(node['id'])] = node
49 |
50 | return nodes, links
51 |
52 |
53 | def collect_non_reroute_nodes(node_map, links, res, node_id):
54 | if node_map[node_id]['type'] != 'Reroute' and node_map[node_id]['type'] != 'Reroute (rgthree)':
55 | res.append(node_id)
56 | else:
57 | for link in node_map[node_id]['outputs'][0]['links']:
58 | next_node_id = str(links[link][2])
59 | collect_non_reroute_nodes(node_map, links, res, next_node_id)
60 |
61 | from .cache_data import CACHED_MAP
62 |
63 |
64 | class To:
65 | @classmethod
66 | def INPUT_TYPES(cls):
67 | return {"required": {"key": ("STRING", {"default": ""}),
68 | },
69 | "optional": {"value": (any_typ, )}
70 | # "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "unique_id": "UNIQUE_ID"}
71 | }
72 |
73 | FUNCTION = "run"
74 | RETURN_TYPES = (any_typ, )
75 | RETURN_NAMES = ("*", )
76 |
77 | def run(self, key, **kwargs):
78 | if "*" in kwargs:
79 | value = kwargs["*"]
80 | elif "value" in kwargs:
81 | value = kwargs["value"]
82 | else:
83 | logger.warning(f"No value assigned for key: {key}, inputs: {kwargs}")
84 |
85 | value = next(iter(kwargs.values()))
86 |
87 | CACHED_MAP[key] = value;
88 | return (value, )
89 |
90 |
91 | def run_getter(key, **kwargs):
92 | if "*" in kwargs:
93 | return (kwargs["*"], )
94 | elif "value" in kwargs:
95 | return (kwargs["value"], )
96 |
97 | else:
98 | for k, v in kwargs.items():
99 | if k in HIDDEN_ARGS:
100 | continue
101 | return (v, )
102 | logger.warning(f"No value assigned for key: {key}, inputs: {kwargs}")
103 |
104 | return None
105 |
106 |
107 | class From:
108 | @classmethod
109 | def INPUT_TYPES(cls):
110 | return {"required": {"key": ("STRING", {"default": ""})},
111 | "optional" : {
112 | "value": (any_typ, )
113 | },
114 | # "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "unique_id": "UNIQUE_ID"}
115 | }
116 |
117 | FUNCTION = "run"
118 | RETURN_TYPES = (any_typ, )
119 | RETURN_NAMES = ("*", )
120 |
121 | def run(self, key, **kwargs):
122 | return run_getter(key, **kwargs)
123 |
124 |
125 | class ImageGetter:
126 | @classmethod
127 | def INPUT_TYPES(cls):
128 | return {"required": {"key": ("STRING", {"default": ""})},
129 | "optional" : {
130 | "value": ("IMAGE", )
131 | },
132 | # "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "unique_id": "UNIQUE_ID"}
133 | }
134 |
135 | FUNCTION = "run"
136 | RETURN_TYPES = ("IMAGE", )
137 | RETURN_NAMES = ("*", )
138 |
139 | def run(self, key, **kwargs):
140 | return run_getter(key, **kwargs)
141 |
142 |
143 | class CachedGetter:
144 | @classmethod
145 | def INPUT_TYPES(cls):
146 | return {"required": {"key": ("STRING", {"default": ""})},
147 | "optional" : {
148 | "value": (any_typ, )
149 | },
150 | # "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "unique_id": "UNIQUE_ID"}
151 | }
152 |
153 | FUNCTION = "run"
154 | RETURN_TYPES = (any_typ, )
155 | RETURN_NAMES = ("*", )
156 |
157 | def run(self, key, **kwargs):
158 | cached_value = CACHED_MAP.get(key)
159 | if cached_value is not None:
160 | return (cached_value,)
161 |
162 | value = run_getter(key, **kwargs)[0]
163 | logger.info(f"There is no cached data for {key}. Caching new data...")
164 | CACHED_MAP[key] = value
165 | return (value, )
166 |
167 |
168 | class FlowBuilder:
169 | @classmethod
170 | def INPUT_TYPES(cls):
171 | return {
172 | "required": {
173 | "value": (any_typ, ),
174 | },
175 | "optional": {
176 | "batch_size": ("INT", {"default": 1, "min": 1, "max": 10000, "step": 1}),
177 | },
178 | "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "unique_id": "UNIQUE_ID"},
179 | }
180 |
181 | FUNCTION = "run"
182 | RETURN_TYPES = (any_typ, )
183 | RETURN_NAMES = ("value", )
184 | CATEGORY = "komojini/flow"
185 |
186 | def run(self, value, **kwargs):
187 | return (value, )
188 |
189 |
190 | class FlowBuilderSetter:
191 | @classmethod
192 | def INPUT_TYPES(cls):
193 | return {
194 | "required": {
195 | "value": (any_typ,),
196 | "key": ("STRING", {"default": ""}),
197 | },
198 | "optional": {
199 | "batch_size": ("INT", {"default": 1, "min": 1, "max": 10000, "step": 1}),
200 | },
201 | "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "unique_id": "UNIQUE_ID"},
202 | }
203 |
204 | FUNCTION = "run"
205 | RETURN_TYPES = (any_typ,)
206 | RETURN_NAMES = ("value",)
207 | CATEGORY = "komojini/flow"
208 |
209 | def run(self, **kwargs):
210 | key = kwargs.get("key")
211 |
212 | if "*" in kwargs:
213 | value = kwargs["*"]
214 | elif "value" in kwargs:
215 | value = kwargs["value"]
216 | else:
217 | logger.warning(f"No value assigned for key: {key}, inputs: {kwargs}")
218 |
219 | value = next(iter(kwargs.values()))
220 |
221 | CACHED_MAP[key] = value
222 | return (value, )
223 |
224 |
225 | from PIL import Image, ImageOps
226 | import torch
227 | import base64
228 | from io import BytesIO
229 | import numpy as np
230 |
231 |
232 |
233 | class DragNUWAImageCanvas:
234 | @classmethod
235 | def INPUT_TYPES(cls):
236 | return {
237 | "required": {
238 | "image": ("STRING", {"default": "[IMAGE DATA]"}),
239 | "tracking_points": ("STRING", {"default": "", "multiline": True})
240 | }
241 | }
242 | FUNCTION = "run"
243 | RETURN_TYPES = ("IMAGE", "STRING",)
244 | RETURN_NAMES = ("image", "tracking_points",)
245 | CATEGORY = "komojini/image"
246 |
247 | def run(self, image, tracking_points, **kwargs):
248 | logger.info(f"DragNUWA output of tracking points: {tracking_points}")
249 |
250 | # Extract the base64 string without the prefix
251 | base64_string = image.split(",")[1]
252 |
253 | # Decode base64 string to bytes
254 | i = base64.b64decode(base64_string)
255 |
256 | # Convert bytes to PIL Image
257 | i = Image.open(BytesIO(i))
258 |
259 | i = ImageOps.exif_transpose(i)
260 | image = i.convert("RGB")
261 | image = np.array(image).astype(np.float32) / 255.0
262 | image = torch.from_numpy(image)[None,]
263 | return (image, tracking_points, )
264 |
265 |
266 | MAX_IMAGE_COUNT = 50
267 |
268 | class BatchCreativeInterpolationNodeDynamicSettings:
269 | @classmethod
270 | def INPUT_TYPES(s):
271 | inputs = {
272 | "required": {
273 | "image_count": ("INT", {"default": 1, "min": 1, "max": MAX_IMAGE_COUNT, "step": 1}),
274 | },
275 | }
276 |
277 | for i in range(1, MAX_IMAGE_COUNT):
278 | if i == 1:
279 | inputs["required"][f"frame_distribution_{i}"] = ("INT", {"default": 4, "min": 4, "max": 64, "step": 1})
280 | else:
281 | inputs["required"][f"frame_distribution_{i}"] = ("INT", {"default": 16, "min": 4, "max": 64, "step": 1})
282 |
283 | inputs["required"][f"key_frame_influence_{i}"] = ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.1})
284 | inputs["required"][f"min_strength_value_{i}"] = ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.1})
285 | inputs["required"][f"max_strength_value_{i}"] = ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.1})
286 |
287 | return inputs
288 |
289 | RETURN_TYPES = ("STRING", "STRING", "STRING",)
290 | RETURN_NAMES = ("dynamic_frame_distribution_values", "dynamic_key_frame_influence_values", "dynamic_strength_values",)
291 |
292 | FUNCTION = "run"
293 |
294 | def run(self, image_count, **kwargs):
295 | dynamic_frame_distribution_values = ""
296 | dynamic_key_frame_influence_values = ""
297 | dynamic_strength_values = ""
298 |
299 | previous_frame_distribution = 0
300 |
301 | for i in range(1, image_count+1):
302 | previous_frame_distribution += kwargs.get(f"frame_distribution_{i}", 0)
303 |
304 | distribution_value = str(previous_frame_distribution) + ","
305 | influence_value = str(kwargs.get(f"key_frame_influence_{i}")) + ","
306 | strength_value = "({min},{max}),".format(min=kwargs.get(f"min_strength_value_{i}"), max=kwargs.get(f"max_strength_value_{i}"))
307 |
308 | dynamic_frame_distribution_values += distribution_value
309 | dynamic_key_frame_influence_values += influence_value
310 | dynamic_strength_values += strength_value
311 |
312 | return (dynamic_frame_distribution_values[:-1], dynamic_key_frame_influence_values[:-1], dynamic_strength_values[:-1],)
313 |
314 | __all__ = [
315 | "To",
316 | "From",
317 | "ImageGetter",
318 | "CachedGetter",
319 | "FlowBuilder",
320 | "FlowBuilderSetter",
321 | "DragNUWAImageCanvas",
322 | "BatchCreativeInterpolationNodeDynamicSettings",
323 | ]
--------------------------------------------------------------------------------
/nodes/logger.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import copy
3 | import logging
4 |
5 |
6 | class ColoredFormatter(logging.Formatter):
7 | COLORS = {
8 | "DEBUG": "\033[0;36m", # CYAN
9 | "INFO": "\033[0;32m", # GREEN
10 | "WARNING": "\033[0;33m", # YELLOW
11 | "ERROR": "\033[0;31m", # RED
12 | "CRITICAL": "\033[0;37;41m", # WHITE ON RED
13 | "RESET": "\033[0m", # RESET COLOR
14 | }
15 |
16 | def format(self, record):
17 | colored_record = copy.copy(record)
18 | levelname = colored_record.levelname
19 | seq = self.COLORS.get(levelname, self.COLORS["RESET"])
20 | colored_record.levelname = f"{seq}{levelname}{self.COLORS['RESET']}"
21 | return super().format(colored_record)
22 |
23 |
24 | # Create a new logger
25 | logger = logging.getLogger("KomojiniCustomNodes")
26 | logger.propagate = False
27 |
28 | # Add handler if we don't have one.
29 | if not logger.handlers:
30 | handler = logging.StreamHandler(sys.stdout)
31 | handler.setFormatter(ColoredFormatter("[%(name)s] - %(levelname)s - %(message)s"))
32 | logger.addHandler(handler)
33 |
34 | # Configure logger
35 | loglevel = logging.DEBUG
36 | logger.setLevel(loglevel)
37 |
--------------------------------------------------------------------------------
/nodes/python_nodes.py:
--------------------------------------------------------------------------------
1 |
2 |
3 | class PythonNode:
4 | @classmethod
5 | def INPUT_TYPES(s):
6 | return {
7 | "required": {
8 | "python_code": ("STRING", {"default": "", "multiline": True})
9 | },
10 | "optional": {
11 |
12 | },
13 | }
--------------------------------------------------------------------------------
/nodes/utils.py:
--------------------------------------------------------------------------------
1 | import hashlib
2 | import os
3 | from typing import Iterable
4 | import shutil
5 | import subprocess
6 |
7 | from .logger import logger
8 |
9 |
10 | def ffmpeg_suitability(path):
11 | try:
12 | version = subprocess.run([path, "-version"], check=True,
13 | capture_output=True).stdout.decode("utf-8")
14 | except:
15 | return 0
16 | score = 0
17 | #rough layout of the importance of various features
18 | simple_criterion = [("libvpx", 20),("264",10), ("265",3),
19 | ("svtav1",5),("libopus", 1)]
20 | for criterion in simple_criterion:
21 | if version.find(criterion[0]) >= 0:
22 | score += criterion[1]
23 | #obtain rough compile year from copyright information
24 | copyright_index = version.find('2000-2')
25 | if copyright_index >= 0:
26 | copyright_year = version[copyright_index+6:copyright_index+9]
27 | if copyright_year.isnumeric():
28 | score += int(copyright_year)
29 | return score
30 |
31 | if "VHS_FORCE_FFMPEG_PATH" in os.environ:
32 | ffmpeg_path = os.env["VHS_FORCE_FFMPEG_PATH"]
33 | else:
34 | ffmpeg_paths = []
35 | try:
36 | from imageio_ffmpeg import get_ffmpeg_exe
37 | imageio_ffmpeg_path = get_ffmpeg_exe()
38 | ffmpeg_paths.append(imageio_ffmpeg_path)
39 | except:
40 | if "VHS_USE_IMAGEIO_FFMPEG" in os.environ:
41 | raise
42 | logger.warn("Failed to import imageio_ffmpeg")
43 | if "VHS_USE_IMAGEIO_FFMPEG" in os.environ:
44 | ffmpeg_path = imageio_ffmpeg_path
45 | else:
46 | system_ffmpeg = shutil.which("ffmpeg")
47 | if system_ffmpeg is not None:
48 | ffmpeg_paths.append(system_ffmpeg)
49 | if len(ffmpeg_paths) == 0:
50 | logger.error("No valid ffmpeg found.")
51 | ffmpeg_path = None
52 | else:
53 | ffmpeg_path = max(ffmpeg_paths, key=ffmpeg_suitability)
54 |
55 |
56 | def get_sorted_dir_files_from_directory(directory: str, skip_first_images: int=0, select_every_nth: int=1, extensions: Iterable=None):
57 | directory = directory.strip()
58 | dir_files = os.listdir(directory)
59 | dir_files = sorted(dir_files)
60 | dir_files = [os.path.join(directory, x) for x in dir_files]
61 | dir_files = list(filter(lambda filepath: os.path.isfile(filepath), dir_files))
62 | # filter by extension, if needed
63 | if extensions is not None:
64 | extensions = list(extensions)
65 | new_dir_files = []
66 | for filepath in dir_files:
67 | ext = "." + filepath.split(".")[-1]
68 | if ext.lower() in extensions:
69 | new_dir_files.append(filepath)
70 | dir_files = new_dir_files
71 | # start at skip_first_images
72 | dir_files = dir_files[skip_first_images:]
73 | dir_files = dir_files[0::select_every_nth]
74 | return dir_files
75 |
76 |
77 | # modified from https://stackoverflow.com/questions/22058048/hashing-a-file-in-python
78 | def calculate_file_hash(filename: str, hash_every_n: int = 1):
79 | h = hashlib.sha256()
80 | b = bytearray(10*1024*1024) # read 10 megabytes at a time
81 | mv = memoryview(b)
82 | with open(filename, 'rb', buffering=0) as f:
83 | i = 0
84 | # don't hash entire file, only portions of it if requested
85 | while n := f.readinto(mv):
86 | if i%hash_every_n == 0:
87 | h.update(mv[:n])
88 | i += 1
89 | return h.hexdigest()
90 |
91 |
92 | def get_audio(file, start_time=0, duration=0):
93 | args = [ffmpeg_path, "-v", "error", "-i", file]
94 | if start_time > 0:
95 | args += ["-ss", str(start_time)]
96 | if duration > 0:
97 | args += ["-t", str(duration)]
98 | return subprocess.run(args + ["-f", "wav", "-"],
99 | stdout=subprocess.PIPE, check=True).stdout
100 |
101 |
102 | def lazy_eval(func):
103 | class Cache:
104 | def __init__(self, func):
105 | self.res = None
106 | self.func = func
107 | def get(self):
108 | if self.res is None:
109 | self.res = self.func()
110 | return self.res
111 | cache = Cache(func)
112 | return lambda : cache.get()
113 |
114 |
115 | def is_url(url):
116 | return url.split("://")[0] in ["http", "https"]
117 |
118 |
119 | def hash_path(path):
120 | if path is None:
121 | return "input"
122 | if is_url(path):
123 | return "url"
124 | return calculate_file_hash(path.strip("\""))
125 |
126 |
127 | def validate_path(path, allow_none=False, allow_url=True):
128 | if path is None:
129 | return allow_none
130 | if is_url(path):
131 | #Probably not feasible to check if url resolves here
132 | return True if allow_url else "URLs are unsupported for this path"
133 | if not os.path.isfile(path.strip("\"")):
134 | return "Invalid file path: {}".format(path)
135 | return True
136 |
137 |
138 |
139 | def to_hashable(inputs):
140 | if isinstance(inputs, dict):
141 | # Convert each key-value pair in the dictionary
142 | hashable_dict = {key: to_hashable(value) for key, value in inputs.items()}
143 | return frozenset(hashable_dict.items())
144 | elif isinstance(inputs, list):
145 | # Convert each element in the list
146 | return tuple(to_hashable(item) for item in inputs)
147 | else:
148 | # Base case: if it's not a dictionary or list, return the element itself
149 | return inputs
150 |
151 |
152 | def hashable_to_dict(hashable_representation):
153 | if isinstance(hashable_representation, frozenset):
154 | # Convert each key-value pair back to a dictionary
155 | original_dict = {key: hashable_to_dict(value) for key, value in hashable_representation}
156 | return original_dict
157 | elif isinstance(hashable_representation, tuple):
158 | # Convert each element in the tuple back to a list
159 | original_list = [hashable_to_dict(item) for item in hashable_representation]
160 | return original_list
161 | else:
162 | # Base case: if it's not a frozenset or tuple, return the element itself
163 | return hashable_representation
164 |
--------------------------------------------------------------------------------
/nodes/video_loaders.py:
--------------------------------------------------------------------------------
1 | from pytube import YouTube
2 | from pytube.exceptions import VideoUnavailable
3 | import cv2
4 | import os
5 | from pathlib import Path
6 | from PIL import Image, ImageOps
7 | from typing import Tuple, Dict, List, Any, Union
8 | import numpy as np
9 |
10 | import torch
11 | import subprocess
12 | import folder_paths
13 | from comfy.utils import common_upscale
14 |
15 | from .logger import logger
16 | from .utils import calculate_file_hash, validate_path, lazy_eval, hash_path
17 |
18 |
19 | video_extensions = ['webm', 'mp4', 'mkv', 'gif']
20 | force_sizes = ["Disabled", "256x?", "?x256", "256x256", "512x?", "?x512", "512x512", "?x768", "768x?"]
21 |
22 | COMMON_REQUIRED_INPUTS = {
23 | "start_sec": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 10000.0, "step": 0.1}),
24 | "end_sec": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 10000.0, "step": 0.1}),
25 | "max_fps": ("INT", {"default": -1, "min": -1, "max": 30, "step": 1}),
26 | "force_size": (force_sizes,),
27 | "frame_load_cap": ("INT", {"default": 50, "min": 1, "max": 10000, "step": 1}),
28 | }
29 |
30 | EMPTY_VIDEO_INPUTS = {
31 | "width": ("INT", {"default": 512, "min": 64, "max": 8192, "step": 64}),
32 | "height": ("INT", {"default": 512, "min": 64, "max": 8192, "step": 64}),
33 | "frame_count": ("INT", {"default": 1, "min": 1, "max": 4096}),
34 | "fps": ("INT", {"default": 10, "min": 1, "max": 1000, "step": 1}),
35 | }
36 |
37 | def target_size(width, height, force_size) -> tuple[int, int]:
38 | if force_size != "Disabled":
39 | force_size = force_size.split("x")
40 | if force_size[0] == "?":
41 | width = (width*int(force_size[1]))//height
42 | #Limit to a multple of 8 for latent conversion
43 | #TODO: Consider instead cropping and centering to main aspect ratio
44 | width = int(width)+4 & ~7
45 | height = int(force_size[1])
46 | elif force_size[1] == "?":
47 | height = (height*int(force_size[0]))//width
48 | height = int(height)+4 & ~7
49 | width = int(force_size[0])
50 | else:
51 | width = int(force_size[0])
52 | height = int(force_size[1])
53 | return (width, height)
54 |
55 |
56 | def frame_to_tensor(frame) -> torch.Tensor:
57 | frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
58 | # convert frame to comfyui's expected format (taken from comfy's load image code)
59 | image = Image.fromarray(frame)
60 | image = ImageOps.exif_transpose(image)
61 | image = np.array(image, dtype=np.float32) / 255.0
62 | image = torch.from_numpy(image)[None,]
63 | return image
64 |
65 | def process_video_cap(
66 | video_cap,
67 | start_sec,
68 | end_sec,
69 | frame_load_cap,
70 | max_fps = None,
71 | ):
72 | fps = int(video_cap.get(cv2.CAP_PROP_FPS))
73 | width, height = int(video_cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(video_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
74 | frame_count = int(video_cap.get(cv2.CAP_PROP_FRAME_COUNT))
75 |
76 | if not frame_load_cap or frame_load_cap <= 0:
77 | frame_load_cap = 999999
78 |
79 | if not end_sec:
80 | end_sec = frame_count / fps
81 |
82 | # Calculate the total number of frames in the specified time range
83 | video_sec = end_sec - start_sec
84 | original_frame_length = int(video_sec * fps)
85 |
86 | step = max(original_frame_length // frame_load_cap, 1)
87 | new_fps = fps // step
88 |
89 | if max_fps and 0 < max_fps < new_fps:
90 | if (step * new_fps) % new_fps != 0:
91 | logger.warning(f"Warning | new_fps: {new_fps}, max_fps: {max_fps}, modified step: int({step / max_fps * new_fps})")
92 | step = int(step / max_fps * new_fps)
93 | new_fps = max_fps
94 |
95 |
96 | start_frame = fps * start_sec
97 | end_frame = fps * end_sec
98 |
99 | frames_added = 0
100 | images = []
101 |
102 | curr_frame = start_frame
103 |
104 | logger.info(f"start_frame: {start_frame}\nend_frame: {end_frame}\nstep: {step}\n")
105 |
106 | while True:
107 | # Set the frame position
108 | int_curr_frame = int(curr_frame)
109 |
110 | video_cap.set(cv2.CAP_PROP_POS_FRAMES, int_curr_frame)
111 |
112 | ret, frame = video_cap.read()
113 | if not ret:
114 | break
115 |
116 | # Append the frame to the frames list
117 | image = frame_to_tensor(frame)
118 | images.append(image)
119 | frames_added += 1
120 |
121 | # if cap exists and we've reached it, stop processing frames
122 | if frame_load_cap > 0 and frames_added >= frame_load_cap:
123 | break
124 | if curr_frame >= end_frame:
125 | break
126 |
127 | curr_frame += step
128 |
129 | #Setup lambda for lazy audio capture
130 | #audio = lambda : get_audio(video, skip_first_frames * target_frame_time, frame_load_cap*target_frame_time)
131 | return (images, frames_added, new_fps, width, height)
132 |
133 |
134 | def load_video_cv(
135 | video: str,
136 | start_sec: float,
137 | end_sec: float,
138 | frame_load_cap: int = 50,
139 | output_dir = None,
140 | max_fps: int = -1,
141 | force_size = "Disabled",
142 | **kwargs,
143 | ) -> Tuple[torch.Tensor, int, int, int, int]:
144 |
145 | video_cap = None
146 | try:
147 | video_cap = cv2.VideoCapture(video)
148 | if not video_cap.isOpened():
149 | raise ValueError(f"{video} could not be loaded with cv.")
150 | images, frames_added, fps, width, height = process_video_cap(video_cap, start_sec, end_sec, frame_load_cap, max_fps)
151 |
152 | finally:
153 | if video_cap:
154 | video_cap.release()
155 | if len(images) == 0:
156 | raise RuntimeError("No frames generated")
157 | images = torch.cat(images, dim=0)
158 | if force_size != "Disabled":
159 | new_size = target_size(width, height, force_size)
160 |
161 | if new_size[0] != width or new_size[1] != height:
162 | s = images.movedim(-1,1)
163 | s = common_upscale(s, new_size[0], new_size[1], "lanczos", "center")
164 | images = s.movedim(1,-1)
165 | width, height = new_size
166 |
167 | # TODO: raise an error maybe if no frames were loaded?
168 |
169 | # Setup lambda for lazy audio capture
170 | # audio = lambda : get_audio(video, skip_first_frames * target_frame_time,
171 | # frame_load_cap*target_frame_time)
172 |
173 | return (images, frames_added, fps, width, height,)
174 |
175 |
176 | def is_gif(filename) -> bool:
177 | return str(filename).endswith("gif")
178 |
179 |
180 | def get_audio(file, start_time=0, duration=0):
181 | # TODO: set ffmpeg_path
182 | ffmpeg_path = ""
183 | args = [ffmpeg_path, "-v", "error", "-i", file]
184 | if start_time > 0:
185 | args += ["-ss", str(start_time)]
186 | if duration > 0:
187 | args += ["-t", str(duration)]
188 | return subprocess.run(args + ["-f", "wav", "-"],
189 | stdout=subprocess.PIPE, check=True).stdout
190 |
191 |
192 | def download_youtube_video(
193 | youtube_url: str,
194 | start_sec: float,
195 | end_sec: float,
196 | frame_load_cap: int = 50,
197 | output_dir = None,
198 | force_size = "Disabled",
199 | max_fps = None,
200 | **kwargs,
201 | ):
202 | if not output_dir:
203 | output_dir = os.path.join(folder_paths.output_directory, "youtube")
204 |
205 | if not os.path.exists(output_dir):
206 | os.makedirs(output_dir, exist_ok=True)
207 |
208 | cap = None
209 |
210 | try:
211 | yt = YouTube(youtube_url)
212 | stream = yt.streams.filter(progressive=True, file_extension='mp4').order_by('resolution').desc().first()
213 |
214 | video_path = stream.download(output_dir)
215 |
216 | cap = cv2.VideoCapture(video_path)
217 | images, frames_added, fps, width, height = process_video_cap(cap, start_sec, end_sec, frame_load_cap, max_fps)
218 |
219 | finally:
220 | # Release the video capture object
221 | if cap:
222 | cap.release()
223 |
224 | if len(images) == 0:
225 | raise RuntimeError("No frames generated")
226 | images = torch.cat(images, dim=0)
227 |
228 | if force_size != "Disabled":
229 | new_size = target_size(width, height, force_size)
230 | if new_size[0] != width or new_size[1] != height:
231 | s = images.movedim(-1,1)
232 | s = common_upscale(s, new_size[0], new_size[1], "lanczos", "center")
233 | images = s.movedim(1,-1)
234 | width, height = new_size
235 |
236 | #Setup lambda for lazy audio capture
237 | #audio = lambda : get_audio(video, skip_first_frames * target_frame_time, frame_load_cap*target_frame_time)
238 | return (images, frames_added, fps, width, height)
239 |
240 |
241 | class YouTubeVideoLoader:
242 | @classmethod
243 | def INPUT_TYPES(s):
244 |
245 | inputs = {
246 | "required": {"youtube_url": ("STRING", {"default": "youtube/url/here"}),},
247 | "optional": {
248 | "output_dir": ("STRING", {"default": ""}),
249 | }
250 | }
251 | inputs["required"].update(COMMON_REQUIRED_INPUTS)
252 |
253 | return inputs
254 |
255 | FUNCTION = "load_video"
256 | RETURN_TYPES = ("IMAGE", "INT", "INT", "INT", "INT",)
257 | RETURN_NAMES = ("images", "frame_count", "fps", "width", "height",)
258 | CATEGORY = "komojini/Video"
259 |
260 | def load_video(self, **kwargs):
261 | return download_youtube_video(**kwargs)
262 |
263 |
264 | class UltimateVideoLoader:
265 | source = [
266 | "fileupload",
267 | "filepath",
268 | "YouTube",
269 | "emptyvideo",
270 | ]
271 |
272 | @classmethod
273 | def INPUT_TYPES(cls):
274 | input_dir = folder_paths.get_input_directory()
275 | files = []
276 | for f in os.listdir(input_dir):
277 | if os.path.isfile(os.path.join(input_dir, f)):
278 | file_parts = f.split('.')
279 | if len(file_parts) > 1 and (file_parts[-1] in video_extensions):
280 | files.append(f)
281 |
282 | inputs = {
283 | "required": {
284 | "source": (cls.source,),
285 | "youtube_url": ("STRING", {"default": "youtube/url/here"}),
286 | "video": ("STRING", {"default": "X://insert/path/here.mp4", "path_extensions": video_extensions}),
287 | "upload": (sorted(files),),
288 | }
289 | }
290 |
291 | inputs["required"].update(COMMON_REQUIRED_INPUTS)
292 | inputs["required"].update(EMPTY_VIDEO_INPUTS)
293 |
294 | return inputs
295 |
296 | FUNCTION = "load_video"
297 | RETURN_TYPES = ("IMAGE", "INT", "INT", "INT", "INT",)
298 | RETURN_NAMES = ("images", "frame_count", "fps", "width", "height",)
299 | CATEGORY = "komojini/Video"
300 |
301 | def load_video(self, **kwargs):
302 | source = kwargs.get("source")
303 | if source == "YouTube":
304 | images, frames_count, fps, width, height = download_youtube_video(**kwargs)
305 | elif source == "filepath":
306 | images, frames_count, fps, width, height = load_video_cv(**kwargs)
307 | elif source == "fileupload":
308 | kwargs['video'] = folder_paths.get_annotated_filepath(kwargs['upload'].strip("\""))
309 | images, frames_count, fps, width, height = load_video_cv(**kwargs)
310 | elif source == "emptyvideo":
311 | frames_count = kwargs["frame_count"]
312 | width, height = kwargs["width"], kwargs["height"]
313 | fps = kwargs["fps"]
314 | images = torch.zeros([frames_count, height, width, 3])
315 |
316 | logger.debug(f"loaded video images.shape: {images.shape}, frames_count: {frames_count}, fpe: {fps}, widthxheight: {width}x{height}")
317 | return (images, frames_count, fps, width, height,)
318 |
319 | # @classmethod
320 | # def IS_CHANGED(s, upload, **kwargs):
321 | # logger.debug(f"is_changed | source: {source}")
322 |
323 | # source = kwargs.get("source")
324 | # if source == "filepath":
325 | # video = kwargs.get("video")
326 | # return hash_path(video)
327 | # elif source == "fileupload":
328 | # image_path = folder_paths.get_annotated_filepath(upload)
329 | # return calculate_file_hash(image_path)
330 | # else:
331 | # youtube_url = kwargs.get("youtube_url")
332 | # return hash_path(youtube_url)
333 |
334 | # @classmethod
335 | # def VALIDATE_INPUTS(s, video, force_size, **kwargs):
336 | # return validate_path(video, allow_none=True)
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | pytube==15.0.0
2 | opencv-python
3 | numpy
4 | torch
5 | pillow
6 | psutil
7 | gputil
--------------------------------------------------------------------------------
/video_formats/ProRes.json:
--------------------------------------------------------------------------------
1 | {
2 | "main_pass":
3 | [
4 | "-n", "-c:v", "prores_ks",
5 | "-profile:v","3",
6 | "-pix_fmt", "yuv422p10"
7 | ],
8 | "audio_pass": ["-c:a", "pcm_s16le"],
9 | "extension": "mov"
10 | }
11 |
--------------------------------------------------------------------------------
/video_formats/av1-webm.json:
--------------------------------------------------------------------------------
1 | {
2 | "main_pass":
3 | [
4 | "-n", "-c:v", "libsvtav1",
5 | "-pix_fmt", ["pix_fmt", ["yuv420p10le", "yuv420p"]],
6 | "-crf", ["crf","INT", {"default": 23, "min": 0, "max": 100, "step": 1}]
7 | ],
8 | "audio_pass": ["-c:a", "libopus"],
9 | "input_color_depth": ["input_color_depth", ["8bit", "16bit"]],
10 | "save_metadata": ["save_metadata", "BOOLEAN", {"default": true}],
11 | "extension": "webm",
12 | "environment": {"SVT_LOG": "1"}
13 | }
14 |
--------------------------------------------------------------------------------
/video_formats/h264-mp4.json:
--------------------------------------------------------------------------------
1 | {
2 | "main_pass":
3 | [
4 | "-n", "-c:v", "libx264",
5 | "-pix_fmt", ["pix_fmt", ["yuv420p", "yuv420p10le"]],
6 | "-crf", ["crf","INT", {"default": 19, "min": 0, "max": 100, "step": 1}]
7 | ],
8 | "audio_pass": ["-c:a", "aac"],
9 | "save_metadata": ["save_metadata", "BOOLEAN", {"default": true}],
10 | "extension": "mp4"
11 | }
12 |
--------------------------------------------------------------------------------
/video_formats/h265-mp4.json:
--------------------------------------------------------------------------------
1 | {
2 | "main_pass":
3 | [
4 | "-n", "-c:v", "libx265",
5 | "-pix_fmt", ["pix_fmt", ["yuv420p10le", "yuv420p"]],
6 | "-crf", ["crf","INT", {"default": 22, "min": 0, "max": 100, "step": 1}],
7 | "-preset", "medium",
8 | "-x265-params", "log-level=quiet"
9 | ],
10 | "audio_pass": ["-c:a", "aac"],
11 | "save_metadata": ["save_metadata", "BOOLEAN", {"default": true}],
12 | "extension": "mp4"
13 | }
14 |
--------------------------------------------------------------------------------
/video_formats/webm.json:
--------------------------------------------------------------------------------
1 | {
2 | "main_pass":
3 | [
4 | "-n",
5 | "-pix_fmt", "yuv420p",
6 | "-crf", ["crf","INT", {"default": 20, "min": 0, "max": 100, "step": 1}],
7 | "-b:v", "0"
8 | ],
9 | "audio_pass": ["-c:a", "libvorbis"],
10 | "save_metadata": ["save_metadata", "BOOLEAN", {"default": true}],
11 | "extension": "webm"
12 | }
13 |
--------------------------------------------------------------------------------