├── .gitignore
├── assets
├── 01.png
├── 02.png
├── 03.png
├── 04.png
├── 05.jpg
├── 06.jpg
├── 07.png
├── 08.png
├── 09.png
├── 10.png
├── lxwm.jpg
└── image
│ ├── bg-image.png
│ ├── bg-image1.png
│ ├── bg-image2.png
│ ├── bg-image3.png
│ └── bg-image33.png
├── huise_admin
├── assets
│ ├── out.png
│ ├── input.png
│ ├── login.jpg
│ ├── p-bg.png
│ ├── working.png
│ ├── yindao.png
│ └── bg-image-box.png
├── index.html
└── input.js
├── requirements.txt
├── pyproject.toml
├── install.py
├── mime.py
├── README.md
├── web
├── index.js
└── web.js
├── LICENSE
├── public.py
├── utils.py
├── wss.py
└── __init__.py
/.gitignore:
--------------------------------------------------------------------------------
1 | __pycache__/
2 | config/
--------------------------------------------------------------------------------
/assets/01.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhulu111/ComfyUI_Bxb/HEAD/assets/01.png
--------------------------------------------------------------------------------
/assets/02.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhulu111/ComfyUI_Bxb/HEAD/assets/02.png
--------------------------------------------------------------------------------
/assets/03.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhulu111/ComfyUI_Bxb/HEAD/assets/03.png
--------------------------------------------------------------------------------
/assets/04.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhulu111/ComfyUI_Bxb/HEAD/assets/04.png
--------------------------------------------------------------------------------
/assets/05.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhulu111/ComfyUI_Bxb/HEAD/assets/05.jpg
--------------------------------------------------------------------------------
/assets/06.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhulu111/ComfyUI_Bxb/HEAD/assets/06.jpg
--------------------------------------------------------------------------------
/assets/07.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhulu111/ComfyUI_Bxb/HEAD/assets/07.png
--------------------------------------------------------------------------------
/assets/08.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhulu111/ComfyUI_Bxb/HEAD/assets/08.png
--------------------------------------------------------------------------------
/assets/09.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhulu111/ComfyUI_Bxb/HEAD/assets/09.png
--------------------------------------------------------------------------------
/assets/10.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhulu111/ComfyUI_Bxb/HEAD/assets/10.png
--------------------------------------------------------------------------------
/assets/lxwm.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhulu111/ComfyUI_Bxb/HEAD/assets/lxwm.jpg
--------------------------------------------------------------------------------
/assets/image/bg-image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhulu111/ComfyUI_Bxb/HEAD/assets/image/bg-image.png
--------------------------------------------------------------------------------
/assets/image/bg-image1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhulu111/ComfyUI_Bxb/HEAD/assets/image/bg-image1.png
--------------------------------------------------------------------------------
/assets/image/bg-image2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhulu111/ComfyUI_Bxb/HEAD/assets/image/bg-image2.png
--------------------------------------------------------------------------------
/assets/image/bg-image3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhulu111/ComfyUI_Bxb/HEAD/assets/image/bg-image3.png
--------------------------------------------------------------------------------
/huise_admin/assets/out.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhulu111/ComfyUI_Bxb/HEAD/huise_admin/assets/out.png
--------------------------------------------------------------------------------
/assets/image/bg-image33.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhulu111/ComfyUI_Bxb/HEAD/assets/image/bg-image33.png
--------------------------------------------------------------------------------
/huise_admin/assets/input.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhulu111/ComfyUI_Bxb/HEAD/huise_admin/assets/input.png
--------------------------------------------------------------------------------
/huise_admin/assets/login.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhulu111/ComfyUI_Bxb/HEAD/huise_admin/assets/login.jpg
--------------------------------------------------------------------------------
/huise_admin/assets/p-bg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhulu111/ComfyUI_Bxb/HEAD/huise_admin/assets/p-bg.png
--------------------------------------------------------------------------------
/huise_admin/assets/working.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhulu111/ComfyUI_Bxb/HEAD/huise_admin/assets/working.png
--------------------------------------------------------------------------------
/huise_admin/assets/yindao.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhulu111/ComfyUI_Bxb/HEAD/huise_admin/assets/yindao.png
--------------------------------------------------------------------------------
/huise_admin/assets/bg-image-box.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhulu111/ComfyUI_Bxb/HEAD/huise_admin/assets/bg-image-box.png
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | Pillow
2 | tqdm
3 | aiohttp
4 | filetype
5 | websockets<15.0.0
6 | aiohttp-cors
7 | requests
8 | numpy
9 | piexif
10 |
--------------------------------------------------------------------------------
/huise_admin/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 | 变现
8 |
9 |
10 |
11 |
12 |
14 |
15 |
16 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [project]
2 | name = "comfyui_bxb"
3 | description = "sdBxb, a tool that converts ComfyUI workflows into WeChat Mini Program, Douyin Mini Program, and H5 with one click, and supports payments."
4 | version = "1.0.0"
5 | license = { file = "LICENSE" }
6 |
7 | [project.urls]
8 | Repository = "https://github.com/zhulu111/ComfyUI_Bxb"
9 | # Used by Comfy Registry https://comfyregistry.org
10 |
11 | [tool.comfy]
12 | PublisherId = ""
13 | DisplayName = "ComfyUI_Bxb"
14 | Icon = ""
15 |
16 |
--------------------------------------------------------------------------------
/install.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import os.path
3 | import subprocess
4 | custom_nodes_path = os.path.dirname(os.path.abspath(__file__))
5 | def build_pip_install_cmds(args):
6 | if "python_embeded" in sys.executable or "python_embedded" in sys.executable:
7 | return [sys.executable, '-s', '-m', 'pip', 'install'] + args
8 | else:
9 | return [sys.executable, '-m', 'pip', 'install'] + args
10 | def ensure_package():
11 | cmds = build_pip_install_cmds(['-r', 'requirements.txt'])
12 | subprocess.run(cmds, cwd=custom_nodes_path)
13 | ensure_package()
14 |
--------------------------------------------------------------------------------
/huise_admin/input.js:
--------------------------------------------------------------------------------
1 | import{api}from"../../../scripts/api.js";const api_base=api.api_base;async function loadVueApp(){try{const e=await fetch("huise_admin/index.html");if(!e.ok)throw new Error("Network response was not ok");const t=await e.text(),n=document.createElement("div");n.innerHTML=t;n.querySelectorAll('link[rel="stylesheet"]').forEach((e=>{const t=document.createElement("link");t.rel="stylesheet",t.href=`${e.getAttribute("href")}`,document.head.appendChild(t)}));const o=n.querySelectorAll("script");for(const e of o){const t=document.createElement("script");e.getAttribute("src")&&(t.src=`${e.getAttribute("src")}`,t.defer=!0,document.body.appendChild(t))}const r=n.querySelector("#admin-app-huise");if(r&&function e(t){Array.from(t.childNodes).forEach((n=>{3===n.nodeType?t.removeChild(n):1===n.nodeType&&e(n)}))}(r),r){const e=document.getElementById("app");e?e.innerHTML=r.innerHTML:document.body.appendChild(r),document.documentElement.classList.add("dark")}}catch(e){}}setTimeout((()=>{loadVueApp()}),500);
--------------------------------------------------------------------------------
/mime.py:
--------------------------------------------------------------------------------
1 | import mimetypes
2 | def add_mime_types():
3 | mimetypes.add_type('image/jpeg', '.jpeg')
4 | mimetypes.add_type('image/jpeg', '.jpg')
5 | mimetypes.add_type('image/png', '.png')
6 | mimetypes.add_type('image/gif', '.gif')
7 | mimetypes.add_type('image/webp', '.webp')
8 | mimetypes.add_type('image/avif', '.avif')
9 | mimetypes.add_type('video/mp4', '.mp4')
10 | mimetypes.add_type('video/mpeg', '.mpeg')
11 | mimetypes.add_type('video/x-msvideo', '.avi')
12 | mimetypes.add_type('video/x-ms-wmv', '.wmv')
13 | mimetypes.add_type('video/quicktime', '.mov')
14 | mimetypes.add_type('video/webm', '.webm')
15 | mimetypes.add_type('video/x-flv', '.flv')
16 | mimetypes.add_type('audio/mpeg', '.mp3')
17 | mimetypes.add_type('audio/x-wav', '.wav')
18 | mimetypes.add_type('audio/ogg', '.ogg')
19 | mimetypes.add_type('audio/x-flac', '.flac')
20 | mimetypes.add_type('audio/aac', '.aac')
21 | mimetypes.add_type('audio/x-ms-wma', '.wma')
22 | mimetypes.add_type('audio/x-m4a', '.m4a')
23 | mimetypes.add_type('text/plain', '.txt')
24 | mimetypes.add_type('application/json', '.json')
25 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # SD变现宝
2 | ## 插件功能
3 | SD变现宝插件可以一键把comfyui工作流转为“微信小程序”、“抖音小程序”、“微信内H5”、“快手小程序”,且都支持支付变现。
4 | 在您现有工作流的基础上,加入ComfyUI_Bxb插件, 即可实现一键转换。
5 | ## 使用教程
6 | https://dcn92p748woz.feishu.cn/wiki/Gufowb2pwi4iK2ks3oRcLZBqnrg?from=from_copylink
7 | ## 特别说明
8 | ### 如在使用过程中出现问题,或者您有意见或者建议都请联系我。
9 | 
10 | ## 更新记录
11 | ### 已更新
12 | 2025.02.05:
13 | 1、新增遮罩编辑编辑功能;
14 | 2、新增重新生成功能,用户点击重新生成后,复用之前的输入;
15 | 3、修复与新版ComfyUI样式兼容问题;
16 | 4、新增快手小程序;
17 |
18 | 2024.10.26:
19 | 1、新增抖音小程序iOS端支付功能(钻石支付);
20 | 2、新增微信小程序iOS端支付功能(跳转客服消息,自动回复访问入口);
21 | 3、新增口令直达和口令推广功能,用于短视频推广;
22 | 4、新增微信小程序朋友圈分享功能(商家主页和详情页);
23 | 5、优化推广逻辑,推广开启后,用户生图完成即可看到“推广挣钱”入口;
24 | 6、优化推广逻辑,支持海报推广、二维码推广、口令推广、直接分享推广;
25 | 7、新增个人中心页面(生成记录、推广挣钱、联系作者、关于我们等等);
26 | 8、新增联系作者功能,作者可以自定义个人微信二维码或者抖音二维码;
27 | 9、优化广告显示逻辑,广告和支付同时出现;
28 | 10、优化算力兑换比例,算力由1块钱100算力,改为1块钱1算力;
29 | 注意:更新完插件记得刷新浏览器。
30 |
31 | 2024.9.16:
32 | 1、新增激励视频广告功能(目前已支持:微信小程序和抖音小程序);
33 | 2、修复若干不同环境的兼容问题;
34 |
35 | 2024.9.11:
36 | 1、新增链路选择节点(bxbSwitch);
37 | 2、新增主页背景和制作页背景自定义功能;
38 | 3、新增作品排序功能;
39 |
40 | 2024.9.5:
41 | 1、全新pc端后台管理系统;
42 | 2、全新应用封装功能;
43 | 3、支持文本、图片、视频、数字、下拉框五种输入模式;
44 | 4、支持图片、视频输出;
45 | 5、支持多节点输出,比如工作流中包含多个图片保存或者视频保存节点,小程序中也可以正常输出;
46 | 6、放开免费功能,支持作品单价设置为0,支持免费次数设置为几百几千;
47 | 7、主图支持视频展现;
48 | 注意:已经运营的用户,建议晚几天更新或者在一个新comfyui环境中测试,本次更新涉及很多底层逻辑,我们也进行了多轮测试,但是担心还有没测到的兼容问题;另外更新插件后,请刷新浏览器
49 |
50 | 2024.7.27:
51 | 1、放弃内网穿透,改用全新通信逻辑,从而解决因为防火墙拦截导致的各种网络连接出错问题,360报毒等问题;
52 | 2、增加工作流安全性,杜绝工作流暴露:工作流api数据保存本地电脑,不再上传服务器; 增加专用图片保存节点sdBxb_saveImage(comfyui官方默认的图片保存节点会自动保存工作流信息,sdBxb_saveImage节点不保存工作流信息);
53 | 3、支持Windows、linux、Mac三个主流系统;
54 | 4、解决sdBxb和部分插件的冲突问题;
55 | 5、优化多电脑并发的逻辑,本地电脑的情况下,只要模型路径信息一致,并把原来ComfyUI_Bxb插件中保存的工作流api数据复制到新电脑上,新电脑开机并打开comfyui终端即可,无需再次点击转换;云端电脑的情况下,直接克隆开机,即可实现多电脑并发;
56 | 6、增加用户作品删除和分享功能;
57 |
58 | 2024.7.15:
59 | 1、增加微信作品推广分成功能,支持微信小程序和微信内h5,推广者在作者主页申请推广,作者在个人中心同意推广,那么推广者就可以通过二维码或者链接去推广当前作者的作品了,分成比例作者自由控制;
60 |
61 | 2024.7.9:
62 | 1、增加抖音小程序推广计划功能;开启并审核通过后,可在抖音APP“小程序推广计划”这个小程序中找到对应作品的推广入口;
63 |
64 | 2024.7.3:
65 | 1、增加储值功能;
66 | 2、增加免费次数功能;
67 | 3、抖音小程序端支持一次出多图和视频输入输出;
68 |
69 | 2024.6.28:
70 | 1、支持一次出多图;
71 | 2、支持视频输入和视频输出;
72 | 注意:1、更新后会导致之前的小程序作品无法更新,以后会尽量避免这种情况出现;2、抖音小程序暂时不支持这些功能;微信小程序、微信内h5、微信外h5这三端都支持呀;
73 |
74 | 2024.6.23:
75 | 已支持多GPU服务器并发。保存工作流,然后在不同的GPU服务器上点击一键转换,即可实现一个作品对应多个GPU服务器;
76 | 注意:模型路径等信息在不同的GPU服务器上要保持一致
77 | ### 计划中
78 | 1、增加供普通用户使用的PC端;
79 | ## 安装方式
80 | 1、通过ComfyUI-Manager在线安装,在ComfyUI-Manager中搜索“ComfyUI_Bxb”即可找到本插件(推荐);
81 | 2、通过git clone下载插件包,放到ComfyUI/custom_nodes目录下,重启ComfyUI即可安装本插件;
82 | 3、官方QQ群:967073981,下载插件包,放到ComfyUI/custom_nodes目录下,重启ComfyUI即可安装本插件;
83 | ## 使用示例和截屏说明
84 | ### ComfyUI插件端截屏
85 | 
86 | 
87 | 
88 | 
89 | 
90 | 
91 | 
92 | 
93 | 
94 | 
--------------------------------------------------------------------------------
/web/index.js:
--------------------------------------------------------------------------------
1 | import{app}from"../../scripts/app.js";import{api}from"../../scripts/api.js";function initWidget(t){t.widgets,t.widgets_values&&t.widgets_values[0];let e=t.widgets_values?t.widgets_values.slice(1):void 0;t.widgets_values;t.widgets=t.widgets.slice(0,1),t.widgets[0].options.values=[];for(const[s,i]of t.inputs.entries()){let i=t.inputs[s];if("select"!==i.name&&i.link){let n=`${i.name}`;e&&e.length>0&&e[s]&&(n=e[s]),t.widgets[0].options.values.push(n),t.addWidget("text",`${i.name}_label`,n,((t,e,s,i,n)=>{let a=[];s.widgets_values&&s.widgets_values[0]&&a.push(s.widgets_values[0]);for(const[t,e]of s.widgets.slice(1).entries())0==a.length&&a.push(e.value.trim()),a.push(e.value.trim());s.widgets_values=a,initWidget(s)}),{input:i.name})}}t.widgets[0].value=t.widgets[0].options.values[0],app.canvas.setDirty(!0,!0)}let nodeDataName="bxbSwitch";app.registerExtension({name:nodeDataName,setup(t){},async beforeRegisterNodeDef(t,e,s){if(e.name===nodeDataName){var i="input";t.prototype.onConnectionsChange;t.prototype.onConnectionsChange=function(t,n,a,p){if(!p)return;if(2==t){if(a&&0==n&&(e.name==nodeDataName&&"Reroute"==s.graph._nodes_by_id[p.target_id]?.type&&s.graph._nodes_by_id[p.target_id].disconnectInput(p.target_slot),"*"===this.outputs[0].type))if("*"===p.type)s.graph._nodes_by_id[p.target_id].disconnectInput(p.target_slot);else{this.outputs[0].type=p.type,this.outputs[0].label=p.type,this.outputs[0].name=p.type;for(let t in this.inputs){let e=this.inputs[t];"select"!=e.name&&(e.type=p.type)}}return}if(e.name===nodeDataName&&"Reroute"==s.graph._nodes_by_id[p.origin_id].type&&this.disconnectInput(p.target_slot),"select"==this.inputs[n].name)return;if("*"===this.inputs[0].type){const t=s.graph.getNodeById(p.origin_id);let e=t.outputs[p.origin_slot].type;if("*"==e)return void this.disconnectInput(p.target_slot);for(const[t,s]of this.inputs.entries()){"select"!==this.inputs[t].name&&(this.inputs[t].type=e)}if(this.outputs[0].type=e,this.outputs[0].label=e,this.outputs[0].name=e,"COMBO"==e)if(this.inputs[p.target_slot].widget)this.outputs[0].widget=this.inputs[p.target_slot].widget;else for(const[e,s]of t.widgets.entries())t.outputs[p.target_slot].name===s.name&&(this.outputs[0].widget=s.options.values);s.canvas.setDirty(!0,!0)}let o=0;o+=this.inputs.find((t=>"select"===t.name))?1:0;let u=this.widgets.find((t=>"select"==t.name));if(!a&&this.inputs.length>1+o){(new Error).stack;"select"!==this.inputs[n].name&&(this.removeInput(n),this.widgets_values=this.widgets_values.slice(n,1),s.graph.onNodeAdded(this),s.canvas.setDirty(!0,!0),u.options.values.splice(n,1),this.widgets.splice(n+1,1))}let l=1;for(let t=0;t{}),!t.widgets||t.widgets.length)}});
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/public.py:
--------------------------------------------------------------------------------
1 | import hashlib
2 | import os
3 | import numpy as np
4 | import aiohttp
5 | import folder_paths
6 | from PIL import Image
7 | import traceback
8 | import json
9 | import sys
10 | import uuid
11 | from io import StringIO, BytesIO
12 | import re
13 | import requests
14 | from comfy.cli_args import parser
15 | import urllib
16 | import urllib.request
17 | import urllib.parse
18 | import filetype
19 | args = parser.parse_args()
20 | if args and args.listen:
21 | pass
22 | else:
23 | args = parser.parse_args([])
24 | import time
25 | import random
26 | from urllib.parse import urlparse
27 | try:
28 | resample_filter = Image.Resampling.LANCZOS
29 | except AttributeError:
30 | resample_filter = Image.ANTIALIAS
31 | def get_address():
32 | return args.listen if '0.0.0.0' not in args.listen else '127.0.0.1'
33 | def get_port():
34 | return args.port
35 | VERSION = '2.0.0'
36 | def write_key_value(key, value, string_io=None):
37 |
38 | if string_io is None:
39 | string_io = StringIO()
40 | json.dump({key: value}, string_io)
41 | else:
42 | string_io.seek(0)
43 | data = json.load(string_io)
44 | data[key] = value
45 | string_io.seek(0)
46 | string_io.truncate()
47 | json.dump(data, string_io)
48 | return string_io
49 | def get_value_by_key(key, string_io):
50 |
51 | string_io.seek(0)
52 | data = json.load(string_io)
53 | return data.get(key)
54 | def delete_key(key, string_io):
55 |
56 | string_io.seek(0)
57 | data = json.load(string_io)
58 | if key in data:
59 | del data[key]
60 | string_io.seek(0)
61 | string_io.truncate()
62 | json.dump(data, string_io)
63 | return string_io
64 | def read_json_from_file(name, path='json/', type_1='json'):
65 | base_url = find_project_custiom_nodes_path() + 'ComfyUI_Bxb/config/' + path
66 | if not os.path.exists(base_url + name):
67 | return None
68 | with open(base_url + name, 'r') as f:
69 | data = f.read()
70 | if data == '':
71 | return None
72 | if type_1 == 'json':
73 | try:
74 | data = json.loads(data)
75 | return data
76 | except ValueError as e:
77 | return None
78 | if type_1 == 'str':
79 | return data
80 | def write_json_to_file(data, name, path='json/', type_1='str'):
81 |
82 | base_url = find_project_custiom_nodes_path() + 'ComfyUI_Bxb/config/' + path
83 | if not os.path.exists(base_url):
84 | os.makedirs(base_url)
85 | if type_1 == 'str':
86 | str_data = str(data)
87 | with open(base_url + name, 'w') as f:
88 | f.write(str_data)
89 | elif type_1 == 'json':
90 | with open(base_url + name, 'w') as f:
91 | json.dump(data, f, indent=2)
92 | def get_output(uniqueid, path='json/api/'):
93 | output = read_json_from_file(uniqueid, path, 'json')
94 | if output is not None:
95 | return output
96 | return None
97 | def get_workflow(uniqueid, path='json/workflow/'):
98 | workflow = read_json_from_file(uniqueid, path, 'json')
99 | if workflow is not None:
100 | return {
101 | 'extra_data': {
102 | 'extra_pnginfo': {
103 | 'workflow': workflow
104 | }
105 | }
106 | }
107 | return None
108 | def delete_workflow(uniqueid):
109 | root_path = find_project_custiom_nodes_path() + 'ComfyUI_Bxb/config/json/'
110 | if os.path.exists(root_path + 'workflow/' + uniqueid + '.json'):
111 | os.remove(root_path + 'workflow/' + uniqueid + '.json')
112 | if os.path.exists(root_path + 'api/' + uniqueid + '.json'):
113 | os.remove(root_path + 'api/' + uniqueid + '.json')
114 | def get_token():
115 | techsid = read_json_from_file('techsid' + str(get_port_from_cmdline()) + '.txt', 'hash/', 'str')
116 | if techsid is not None:
117 | return techsid
118 | else:
119 | return 'init'
120 | pass
121 | def set_token(token):
122 | write_json_to_file(token, 'techsid' + str(get_port_from_cmdline()) + '.txt', 'hash/')
123 | def set_openid(token):
124 | write_json_to_file(token, 'openid' + str(get_port_from_cmdline()) + '.txt', 'hash/')
125 | def get_openid():
126 | openid = read_json_from_file('openid' + str(get_port_from_cmdline()) + '.txt', 'hash/', 'str')
127 | if openid is not None:
128 | return openid
129 | else:
130 | return 'init'
131 | pass
132 | def get_port_from_cmdline():
133 | for i, arg in enumerate(sys.argv):
134 | if arg == '--port' and i + 1 < len(sys.argv):
135 | try:
136 | return int(sys.argv[i + 1])
137 | except ValueError:
138 | pass
139 | match = re.search(r'--port[=\s]*(\d+)', arg)
140 | if match:
141 | try:
142 | return int(match.group(1))
143 | except ValueError:
144 | pass
145 | return 8188
146 | def get_version():
147 | return VERSION
148 | def get_mac_address():
149 | mac = uuid.getnode()
150 | return ':'.join(('%012X' % mac)[i:i + 2] for i in range(0, 12, 2))
151 | def generate_unique_client_id(port):
152 | unique_key = f"{get_mac_address()}:{port}"
153 | hash_object = hashlib.sha256(unique_key.encode())
154 | subdomain = hash_object.hexdigest()[:12]
155 | return subdomain
156 | def find_project_root():
157 | absolute_path = folder_paths.base_path
158 | if not absolute_path.endswith(os.sep):
159 | absolute_path += os.sep
160 | return absolute_path
161 | def find_project_custiom_nodes_path():
162 | absolute_path = folder_paths.folder_names_and_paths["custom_nodes"][0][0]
163 | if not absolute_path.endswith(os.sep):
164 | absolute_path += os.sep
165 | return absolute_path
166 | def find_project_bxb():
167 | script_directory = os.path.dirname(os.path.abspath(__file__))
168 | absolute_path = os.path.abspath(script_directory)
169 | if not absolute_path.endswith(os.sep):
170 | absolute_path += os.sep
171 | return absolute_path
172 | def get_base_url():
173 | return 'https://tt.9syun.com/app/index.php?i=66&t=0&v=1.0&from=wxapp&tech_client=sj&c=entry&a=wxapp&do=ttapp&m=tech_huise&r='
174 | def get_filenames(directory):
175 | if os.path.exists(directory):
176 | all_entries = os.listdir(directory)
177 | all_entries = [name for name in all_entries if os.path.isfile(os.path.join(directory, name))]
178 | all_entries = [name.split('.')[0] for name in all_entries]
179 | return all_entries
180 | else:
181 | return []
182 | def read_json_file(url):
183 | try:
184 | session = requests.Session()
185 | session.trust_env = False
186 | session.proxies = {'http': None, 'https': None}
187 | response = session.get(url)
188 | response.raise_for_status()
189 | response.encoding = 'utf-8'
190 | json_text = response.text.strip().lstrip('\ufeff')
191 | json_content = json.loads(json_text)
192 | return json_content
193 | except requests.exceptions.RequestException as e:
194 | return None
195 | except json.JSONDecodeError as e:
196 | return None
197 | def generate_md5_uid_timestamp_filename(original_filename, name_type=0):
198 |
199 | timestamp = str(time.time())
200 | random_number = str(generate_large_random_number(32))
201 | combined_string = original_filename + timestamp + random_number
202 | md5_hash = hashlib.md5(combined_string.encode('utf-8')).hexdigest()
203 | file_extension = os.path.splitext(original_filename)[1]
204 | if name_type == 1:
205 | filename = hashlib.md5(original_filename.encode('utf-8')).hexdigest() + file_extension
206 | else:
207 | filename = md5_hash + file_extension
208 | return filename
209 | def generate_md5_uid_timestamp(original_filename):
210 |
211 | timestamp = str(time.time())
212 | random_number = str(generate_large_random_number(32))
213 | combined_string = original_filename + timestamp + random_number
214 | md5_hash = hashlib.md5(combined_string.encode('utf-8')).hexdigest()
215 | filename = md5_hash
216 | return filename
217 | def generate_large_random_number(num_bits):
218 |
219 | return random.getrandbits(num_bits)
220 | def async_download_image(url, filename, name_type=0):
221 |
222 | dir_name = folder_paths.get_input_directory()
223 | no_proxy_handler = urllib.request.ProxyHandler({})
224 | opener = urllib.request.build_opener(no_proxy_handler)
225 | if name_type != 0:
226 | file_new_name = generate_md5_uid_timestamp_filename(filename, 1)
227 | else:
228 | file_new_name = generate_md5_uid_timestamp_filename(filename)
229 | try:
230 | full_path = os.path.join(dir_name, file_new_name)
231 | if os.path.exists(full_path):
232 | return {
233 | 'code': True,
234 | 'filename': file_new_name,
235 | }
236 | response = opener.open(url)
237 | if response.getcode() == 200:
238 | with open(full_path, 'wb') as f:
239 | f.write(response.read())
240 | return {
241 | 'code': True,
242 | 'filename': file_new_name,
243 | }
244 | else:
245 | return {
246 | 'code': False,
247 | 'filename': file_new_name,
248 | }
249 | except Exception as e:
250 | return {
251 | 'code': False,
252 | 'filename': file_new_name,
253 | }
254 | async def loca_download_image(url, filename, name_type=0):
255 |
256 | dir_name = folder_paths.get_input_directory()
257 | no_proxy_handler = urllib.request.ProxyHandler({})
258 | opener = urllib.request.build_opener(no_proxy_handler)
259 | if name_type != 0:
260 | file_new_name = generate_md5_uid_timestamp_filename(filename, 1)
261 | else:
262 | file_new_name = generate_md5_uid_timestamp_filename(filename)
263 | try:
264 | full_path = os.path.join(dir_name, file_new_name)
265 | if os.path.exists(full_path):
266 | return {
267 | 'code': True,
268 | 'filename': file_new_name,
269 | }
270 | response = opener.open(url)
271 | if response.getcode() == 200:
272 | with open(full_path, 'wb') as f:
273 | f.write(response.read())
274 | return {
275 | 'code': True,
276 | 'filename': file_new_name,
277 | }
278 | else:
279 | return {
280 | 'code': False,
281 | 'filename': file_new_name,
282 | }
283 | except Exception as e:
284 | return {
285 | 'code': False,
286 | 'filename': file_new_name,
287 | }
288 | def determine_file_type(file_path):
289 | kind = filetype.guess(file_path)
290 | if kind:
291 | mime_type = kind.mime
292 | if mime_type.startswith('video/'):
293 | return mime_type, 'video'
294 | elif mime_type.startswith('image/'):
295 | return mime_type, 'image'
296 | else:
297 | return mime_type, 'unknown'
298 | return False, 'unknown'
299 | def print_exception_in_chinese(e):
300 |
301 | tb = traceback.extract_tb(e.__traceback__)
302 | if tb:
303 | filename, line_number, function_name, text = tb[-1]
304 | for i, (fn, ln, func, txt) in enumerate(tb, 1):
305 | print(f" {txt.strip()}")
306 | traceback.print_exception(type(e), e, e.__traceback__)
307 | else:
308 | pass
309 | def remove_query_parameters(url):
310 | parsed_url = urlparse(url)
311 | url_without_params = parsed_url._replace(query="").geturl()
312 | return url_without_params
313 | def load_image(image_info):
314 |
315 | if image_info['type'] == 'path':
316 | return Image.open(image_info['content'])
317 | elif image_info['type'] == 'binary':
318 | return Image.open(BytesIO(image_info['content']))
319 | else:
320 | raise ValueError("Unknown image type. Supported types are 'path' and 'binary'.")
321 | def resize_image(image, target_width):
322 |
323 | width_percent = (target_width / float(image.size[0]))
324 | height_size = int((float(image.size[1]) * float(width_percent)))
325 | return image.resize((target_width, height_size), resample_filter)
326 | def resize_images(image_info_list, target_width=600):
327 |
328 | return [resize_image(load_image(img_info), target_width) for img_info in image_info_list]
329 | def calculate_canvas_size_for_single_canvas(layouts):
330 |
331 | max_width = 0
332 | total_height = 0
333 | for _, (x, y), img in layouts:
334 | img_width, img_height = img.size
335 | max_width = max(max_width, img_width + x)
336 | total_height = max(total_height, img_height + y)
337 | return max_width, total_height
338 | def calculate_layout(images, target_width=1200):
339 |
340 | layouts = []
341 | current_canvas_index = 0
342 | quadrant_positions = [
343 | (target_width, 0),
344 | (0, 0),
345 | (0, target_width),
346 | (target_width, target_width)
347 | ]
348 | quadrant_used = [False, False, False, False]
349 | remaining_images = images[:]
350 | ii = 0
351 | while remaining_images:
352 | ii = ii + 1
353 | img = remaining_images.pop(0)
354 | img_width, img_height = img.size
355 | placed = False
356 | if ii > 100:
357 | break;
358 | for i in range(4):
359 | if not quadrant_used[i]:
360 | x, y = quadrant_positions[i]
361 | if img_height <= target_width:
362 | layouts.append((current_canvas_index, (x, y), img))
363 | quadrant_used[i] = True
364 | placed = True
365 | break
366 | else:
367 | if i in [0, 3] and quadrant_used[0] == False and quadrant_used[3] == False:
368 | quadrant_used[0] = True
369 | quadrant_used[3] = True
370 | placed = True
371 | elif i in [1, 2] and quadrant_used[1] == False and quadrant_used[2] == False:
372 | quadrant_used[1] = True
373 | quadrant_used[2] = True
374 | placed = True
375 | if not placed:
376 | remaining_images.append(img)
377 | if all(quadrant_used):
378 | current_canvas_index += 1
379 | quadrant_used = [False, False, False, False]
380 | return layouts, current_canvas_index + 1
381 | def group_images_by_height(images):
382 |
383 | sorted_images = sorted(images, key=lambda img: img.size[1], reverse=True)
384 | groups = []
385 | if len(sorted_images) % 2 != 0:
386 | groups.append((sorted_images.pop(0),))
387 | while len(sorted_images) > 1:
388 | groups.append((sorted_images.pop(0), sorted_images.pop(-1)))
389 | return groups
390 | def draw_final_groups(final_groups):
391 |
392 | binary_resources = []
393 | for group_list in final_groups:
394 | total_width = sum(group['canvas_width'] for group in group_list)
395 | max_height = max(group['canvas_height'] for group in group_list)
396 | canvas = Image.new('RGB', (total_width, max_height), (255, 255, 255))
397 | current_x = 0
398 | for group in group_list:
399 | images = group['images']
400 | canvas_width = group['canvas_width']
401 | canvas_height = group['canvas_height']
402 | if len(images) == 3:
403 | img1, img2, img3 = images
404 | combined_width = max(img1.size[0], img2.size[0], img3.size[0])
405 | combined_height = img1.size[1] + img2.size[1] + img3.size[1]
406 | combined_canvas = Image.new('RGB', (combined_width, combined_height), (255, 255, 255))
407 | combined_canvas.paste(img1, (0, 0))
408 | combined_canvas.paste(img2, (0, img1.size[1]))
409 | combined_canvas.paste(img3, (0, img1.size[1] + img2.size[1]))
410 | canvas.paste(combined_canvas, (current_x, 0))
411 | elif len(images) == 2:
412 | img1, img2 = images
413 | combined_height = img1.size[1] + img2.size[1]
414 | combined_canvas = Image.new('RGB', (canvas_width, combined_height), (255, 255, 255))
415 | combined_canvas.paste(img1, (0, 0))
416 | combined_canvas.paste(img2, (0, img1.size[1]))
417 | canvas.paste(combined_canvas, (current_x, 0))
418 | elif len(images) == 1:
419 | img = images[0]
420 | if isinstance(img, tuple) and len(img) == 2:
421 | img1, img2 = img
422 | combined_height = img1.size[1] + img2.size[1]
423 | combined_canvas = Image.new('RGB', (canvas_width, combined_height), (255, 255, 255))
424 | combined_canvas.paste(img1, (0, 0))
425 | combined_canvas.paste(img2, (0, img1.size[1]))
426 | canvas.paste(combined_canvas, (current_x, 0))
427 | else:
428 | canvas.paste(img, (current_x, 0))
429 | current_x += canvas_width
430 | img_byte_arr = BytesIO()
431 | canvas.save(img_byte_arr, format='JPEG', quality=70)
432 | img_byte_arr = img_byte_arr.getvalue()
433 | binary_resources.append(img_byte_arr)
434 | return binary_resources
435 | def combine_images(image_info_list, target_width=600, _format='PNG'):
436 | images = resize_images(image_info_list, target_width)
437 | grouped_images = group_images_by_height(images)
438 | groups = grouped_images[:]
439 | final_groups = []
440 | final_group = None
441 | if len(groups) % 3 != 0:
442 | final_group = groups[-(len(groups) % 3):]
443 | groups_p = groups[:-(len(groups) % 3)]
444 | else:
445 | groups_p = groups
446 | final_groups_arr = [{'images': groups_p[i:i + 3]} for i in range(0, len(groups_p), 3)]
447 | for key, group2 in enumerate(final_groups_arr):
448 | canvas_info = []
449 | for index, group in enumerate(group2['images']):
450 | max_height = []
451 | for img in group:
452 | img_width, img_height = img.size
453 | max_height.append(img_height)
454 | canvas_info.append({
455 | 'canvas_width': 600,
456 | 'canvas_height': sum(max_height),
457 | 'images': group,
458 | })
459 | final_groups.append(canvas_info)
460 | if final_group:
461 | canvas_info = []
462 | for index, group in enumerate(final_group):
463 | max_height = []
464 | max_width = 0
465 | for img in group:
466 | max_width += 1
467 | img_width, img_height = img.size
468 | max_height.append(img_height)
469 | canvas_info.append({
470 | 'canvas_width': 600,
471 | 'canvas_height': sum(max_height),
472 | 'images': group,
473 | })
474 | final_groups.append(canvas_info)
475 | binary_canvases = draw_final_groups(final_groups)
476 | return binary_canvases
477 | def is_aspect_ratio_within_limit(width, height, limit=4):
478 |
479 | long_side = max(width, height)
480 | short_side = min(width, height)
481 | return (long_side / short_side) <= limit
482 | async def get_upload_url(data, techsid, session, path=1):
483 | form_data = aiohttp.FormData()
484 | form_data.add_field('json_data', json.dumps(data))
485 | if path == 1:
486 | upload_url = get_base_url() + 'upload.tencent.generateSignedUrl&techsid=we7sid-' + techsid
487 | else:
488 | upload_url = get_base_url() + 'upload.tencent.getPresign&cid=' + techsid
489 | async with session.post(upload_url, data=form_data) as response:
490 | try:
491 | response_result = await response.text()
492 | result = json.loads(response_result)
493 | return result
494 | except json.JSONDecodeError as e:
495 | return None
496 | async def send_binary_data(session, upload_url, file_path, is_binary=False, mime_type='image/png'):
497 | headers = {
498 | 'Content-Type': mime_type,
499 | }
500 | if is_binary:
501 | binary_data = file_path
502 | else:
503 | with open(file_path, 'rb') as file:
504 | binary_data = file.read()
505 | async with session.put(upload_url, data=binary_data, headers=headers) as response:
506 | if response.status == 200:
507 | return True
508 | else:
509 | return False
510 | def send_binary_data_async(upload_url, file_path, is_binary=False, mime_type='image/png'):
511 | headers = {
512 | 'Content-Type': mime_type,
513 | }
514 | try:
515 | if is_binary:
516 | binary_data = file_path
517 | else:
518 | with open(file_path, 'rb') as file:
519 | binary_data = file.read()
520 | session = requests.Session()
521 | session.trust_env = False
522 | session.proxies = {'http': None, 'https': None}
523 | response = session.put(upload_url, data=binary_data, headers=headers)
524 | if response.status_code == 200:
525 | return True, ''
526 | else:
527 | error_message = f"Error uploading file. Status code: {response.status_code}, Reason: {response.reason}, Response text: {response.text}"
528 | return False, error_message
529 | except Exception as e:
530 | error_message = f"An error occurred while uploading the file: {str(e)}"
531 | return False, error_message
532 | def merge_alpha_channels(a_img_path, b_img_path):
533 | a_img = Image.open(a_img_path).convert("RGBA")
534 | b_img = Image.open(b_img_path).convert("RGBA")
535 | b_a_channel = np.array(b_img)[:, :, 3]
536 | a_img = a_img.resize(b_img.size, resample_filter)
537 | a_img_data = np.array(a_img)
538 | a_img_data[:, :, 3] = np.where(b_a_channel == 255, 0, a_img_data[:, :, 3])
539 | base_name = os.path.basename(a_img_path)
540 | c_img_path = os.path.join(os.path.dirname(a_img_path), f"new_{base_name}.png")
541 | new_a_img = Image.fromarray(a_img_data)
542 | new_a_img.save(c_img_path, format="PNG")
543 | return c_img_path
544 |
--------------------------------------------------------------------------------
/web/web.js:
--------------------------------------------------------------------------------
1 | import {app} from '../../../scripts/app.js'
2 | import {api} from '../../../scripts/api.js'
3 | import {ComfyWidgets} from '../../../scripts/widgets.js'
4 | import {ComfyDialog,$el} from '../../../scripts/ui.js'
5 | // 添加样式
6 | const styleElement = document.createElement("style");
7 | const cssCode = `
8 | #msgDiv{
9 | width:800px;
10 | height: 200px;
11 | text-align: center;
12 | font-size: 30px;
13 | display: flex;
14 | align-items: center;
15 | justify-content: center;
16 | padding-bottom: 40px;
17 | color: var(--fg-color);
18 | }
19 | #qrCode{
20 | display: block;
21 | width:256px;
22 | height:256px;
23 | border-radius: 10px;
24 | }
25 | #qrBox{
26 | display: block;
27 | text-align: center;
28 | display:flex;
29 | flex-wrap: wrap;
30 | justify-content: center;
31 | width: 360px;
32 | }
33 | #qrDesc{
34 | display: block;
35 | text-align: center;
36 | margin-top: 20px;
37 | color: #ffffff;
38 | width: 360px;
39 | }
40 | .codeImg {
41 | // display: block;
42 | width:256px;
43 | height:256px;
44 | border-radius: 10px;
45 | padding: 10px;
46 | border: 2px solid #ffffff;
47 | }
48 | .codeDesc {
49 | display: block;
50 | text-align: center;
51 | margin-top: 20px;
52 | color: #ffffff;
53 | width: 360px;
54 | font-size: 16px;
55 | }
56 | .codeDiv {
57 | color: #ffffff;
58 | }
59 | .codeBox {
60 | display: flex;
61 | text-align: center;
62 | }
63 | #directions{
64 | margin-top: 10px;
65 | width: 100%;
66 | text-align: left;
67 | color: #ffffff;
68 | font-size: 8px;
69 | }
70 | .tech_button {
71 | flex:1;
72 | height:30px;
73 | border-radius: 8px;
74 | border: 2px solid var(--border-color);
75 | font-size:11px;
76 | background:var(--comfy-input-bg);
77 | color:var(--error-text);
78 | box-shadow:none;
79 | cursor:pointer;
80 | width: 1rem;
81 | }
82 | #tech_box {
83 | max-height: 80px;
84 | display:flex;
85 | flex-wrap: wrap;
86 | align-items: flex-start;
87 | }
88 | .uniqueid {
89 | display: none;
90 | }
91 | #showMsgDiv {
92 | width:800px;
93 | padding: 60px 0;
94 | text-align: center;
95 | font-size: 30px;
96 | color: var(--fg-color);
97 | }
98 | `
99 | styleElement.innerHTML = cssCode
100 | document.head.appendChild(styleElement);
101 |
102 | var techsidkey = 'techsid' + window.location.port;
103 | var loading = false;
104 | const msgBox = $el("div.comfy-modal", {parent: document.body}, []);
105 | const msgDiv = $el('div', {id: 'msgDiv'}, '');
106 | msgBox.appendChild(msgDiv);
107 | msgBox.style.display = "none";
108 | msgBox.style.zIndex = 10001;
109 | let manager_instance = null;
110 |
111 | function setCookie(name, value, days = 1) {
112 | var data = {
113 | value: value,
114 | expires: new Date(new Date().getTime() + (days * 24 * 60 * 60 * 1000))
115 | };
116 | localStorage.setItem(name, JSON.stringify(data));
117 | }
118 |
119 | function getCookie(name) {
120 | var data = localStorage.getItem(name);
121 | if (!data) {
122 | return ''; // 未找到数据,返回空字符串
123 | }
124 | data = JSON.parse(data);
125 | if (new Date(data.expires) > new Date()) {
126 | return data.value; // 数据有效,返回存储的值
127 | } else {
128 | localStorage.removeItem(name); // 数据过期,删除项
129 | return ''; // 数据过期,返回空字符串
130 | }
131 | }
132 |
133 |
134 | function generateTimestampedRandomString() {
135 | const timestamp = Date.now().toString(36);
136 | const randomString = Array(3).fill(0).map(() => Math.random().toString(36).substring(2)).join('').substring(0, 18);
137 | const timestampedRandomString = (timestamp + randomString).substring(0, 32);
138 | return timestampedRandomString;
139 | }
140 |
141 |
142 | function showLoading(msg = '') {
143 | hideLoading();
144 | msgDiv.innerText = msg ? msg : '请稍后...';
145 | msgBox.style.display = "block";
146 | loading = true;
147 | }
148 |
149 | function hideLoading() {
150 | msgBox.style.display = "none";
151 | loading = false;
152 | }
153 |
154 | function showToast(msg = '', t = 0) {
155 | t = t > 0 ? t : 2000;
156 | msgDiv.innerText = msg ? msg : '谢谢';
157 | msgBox.style.display = "block";
158 | setTimeout(() => {
159 | msgBox.style.display = "none";
160 | }, t);
161 | }
162 |
163 | var serverUrl = window.location.href;
164 | const qrCode = $el('img', {
165 | id: 'qrCode', src: ``,
166 | onerror: () => {
167 | // console.log('参数错误');
168 | }
169 | })
170 | const qrDesc = $el('div', {id: 'qrDesc'}, '请用微信扫码,验证身份...')
171 | const qrBox = $el('div', {id: 'qrBox'}, [qrCode, qrDesc])
172 | app.ui.dialog.element.style.zIndex = 10010;
173 |
174 | const showMsgDiv = $el('div', {id: 'showMsgDiv'}, '请稍后...')
175 |
176 | function showCodeBox(list) {
177 | app.ui.dialog.close();
178 | let listn = [];
179 | for (let i = 0; i < list.length; i++) {
180 | listn.push($el('div.codeDiv', {}, [$el('img.codeImg', {src: list[i].code}), $el('div.codeDesc', {}, list[i].desc)]))
181 | }
182 | const codeBox = $el('div.codeBox', {}, listn)
183 | app.ui.dialog.show(codeBox);
184 | }
185 |
186 |
187 | function showQrBox(img, desc) {
188 | app.ui.dialog.close();
189 | qrDesc.innerText = desc;
190 | qrCode.src = img;
191 | app.ui.dialog.show(qrBox);
192 | }
193 |
194 | function hideCodeBox() {
195 | app.ui.dialog.close();
196 | }
197 |
198 | function showMsg(msg) {
199 | app.ui.dialog.close();
200 | showMsgDiv.innerText = msg;
201 | app.ui.dialog.show(showMsgDiv);
202 | }
203 |
204 |
205 | function hideMsg() {
206 | app.ui.dialog.close();
207 | }
208 |
209 | function tech_alert(text) {
210 | loading = false;
211 | // alert(text);
212 | showMsg(text);
213 | }
214 |
215 | function getPostData(prompt) {
216 | const output = prompt['output'];
217 | let HuiseNum = 0;
218 | let HuiseO = {};
219 | let HuiseN = {};
220 | let postData = {};
221 | let saveImageNodes = [];
222 | for (const key in output) {
223 | if (output[key].class_type == 'sdBxb') {
224 | HuiseO = output[key].inputs;
225 | HuiseNum++;
226 | }
227 | if (output[key].class_type == 'SaveImage' || output[key].class_type == 'VHS_VideoCombine' || output[key].class_type == 'sdBxb_saveImage') {
228 | output[key].res_node = key;
229 | saveImageNodes.push(output[key]);
230 | }
231 | }
232 | if (HuiseNum > 1) {
233 | return ('工作流中只可以存在1个“SD变现宝”节点');
234 | }
235 | if (saveImageNodes.length < 1) {
236 | return ('请确保工作流中有且仅有1个“SaveImgae”、“sdBxb_saveImage”或“VHS_VideoCombine”节点,目前有' + saveImageNodes.length + '个');
237 | } else if (saveImageNodes.length > 1) {
238 | return ('请确保工作流中有且仅有1个“SaveImgae”、“sdBxb_saveImage”或“VHS_VideoCombine”节点,目前有' + saveImageNodes.length + '个');
239 | } else {
240 | postData['res_node'] = saveImageNodes[0].res_node;
241 | }
242 | if (HuiseO) {
243 | HuiseN['zhutu1'] = HuiseO['app_img1(optional)'];
244 | HuiseN['zhutu2'] = HuiseO['app_img2(optional)'];
245 | HuiseN['zhutu3'] = HuiseO['app_img3(optional)'];
246 |
247 | HuiseN['cs_img1'] = HuiseO['custom_img1(optional)'];
248 | HuiseN['cs_img2'] = HuiseO['custom_img2(optional)'];
249 | HuiseN['cs_img3'] = HuiseO['custom_img3(optional)'];
250 | HuiseN['cs_video1'] = HuiseO['custom_video1(optional)'];
251 | HuiseN['cs_video2'] = HuiseO['custom_video2(optional)'];
252 | HuiseN['cs_video3'] = HuiseO['custom_video3(optional)'];
253 | HuiseN['cs_text1'] = HuiseO['custom_text1(optional)'];
254 | HuiseN['cs_text2'] = HuiseO['custom_text2(optional)'];
255 | HuiseN['cs_text3'] = HuiseO['custom_text3(optional)'];
256 | HuiseN['title'] = HuiseO['app_title'];
257 | HuiseN['gn_desc'] = HuiseO['app_desc'];
258 | HuiseN['sy_desc'] = '作品使用说明';
259 | HuiseN['server'] = serverUrl;
260 | HuiseN['fee'] = HuiseO['app_fee'];
261 | HuiseN['free_times'] = HuiseO['free_times'];
262 | HuiseN['cs_img1_desc'] = HuiseO['custom_img1_desc'];
263 | HuiseN['cs_img2_desc'] = HuiseO['custom_img2_desc'];
264 | HuiseN['cs_img3_desc'] = HuiseO['custom_img3_desc'];
265 | HuiseN['cs_video1_desc'] = HuiseO['custom_video1_desc'];
266 | HuiseN['cs_video2_desc'] = HuiseO['custom_video2_desc'];
267 | HuiseN['cs_video3_desc'] = HuiseO['custom_video3_desc'];
268 | HuiseN['cs_text1_desc'] = HuiseO['custom_text1_desc'];
269 | HuiseN['cs_text2_desc'] = HuiseO['custom_text2_desc'];
270 | HuiseN['cs_text3_desc'] = HuiseO['custom_text3_desc'];
271 | HuiseN['uniqueid'] = HuiseO['uniqueid'];
272 | postData['zhutus'] = [];
273 | if (HuiseN['zhutu1']) {
274 | if (output[HuiseN['zhutu1'][0]].class_type == 'LoadImage') {
275 | if (output[HuiseN['zhutu1'][0]].inputs.image) {
276 | postData['zhutus'].push(output[HuiseN['zhutu1'][0]].inputs.image);
277 | }
278 | } else {
279 | return ('“app_img1”只可以连接“LoadImage”节点');
280 | }
281 | }
282 | if (HuiseN['zhutu2']) {
283 | if (output[HuiseN['zhutu2'][0]].class_type == 'LoadImage') {
284 | if (output[HuiseN['zhutu2'][0]].inputs.image) {
285 | postData['zhutus'].push(output[HuiseN['zhutu2'][0]].inputs.image);
286 | }
287 | } else {
288 | return ('“app_img2”只可以连接“LoadImage”节点');
289 | }
290 | }
291 | if (HuiseN['zhutu3']) {
292 | if (output[HuiseN['zhutu3'][0]].class_type == 'LoadImage') {
293 | if (output[HuiseN['zhutu3'][0]].inputs.image) {
294 | postData['zhutus'].push(output[HuiseN['zhutu3'][0]].inputs.image);
295 | }
296 | } else {
297 | return ('“app_img3”只可以连接“LoadImage”节点');
298 | }
299 | }
300 |
301 | postData['cs_img_nodes'] = [];
302 | if (HuiseN['cs_img1']) {
303 | if (output[HuiseN['cs_img1'][0]].class_type == 'LoadImage') {
304 | postData['cs_img_nodes'].push({node: HuiseN['cs_img1'][0], desc: HuiseN['cs_img1_desc']});
305 | } else {
306 | return ('“custom_img1”只可以连接“LoadImage”节点');
307 | }
308 | }
309 | if (HuiseN['cs_img2']) {
310 | if (output[HuiseN['cs_img2'][0]].class_type == 'LoadImage') {
311 | postData['cs_img_nodes'].push({node: HuiseN['cs_img2'][0], desc: HuiseN['cs_img2_desc']});
312 | } else {
313 | return ('“custom_img2”只可以连接“LoadImage”节点');
314 | }
315 | }
316 | if (HuiseN['cs_img3']) {
317 | if (output[HuiseN['cs_img3'][0]].class_type == 'LoadImage') {
318 | postData['cs_img_nodes'].push({node: HuiseN['cs_img3'][0], desc: HuiseN['cs_img3_desc']});
319 | } else {
320 | return ('“custom_img3”只可以连接“LoadImage”节点');
321 | }
322 | }
323 |
324 | postData['cs_video_nodes'] = [];
325 | if (HuiseN['cs_video1']) {
326 | if (output[HuiseN['cs_video1'][0]].class_type == 'VHS_LoadVideo') {
327 | postData['cs_video_nodes'].push({node: HuiseN['cs_video1'][0], desc: HuiseN['cs_video1_desc']});
328 | } else {
329 | return ('“custom_video1”只可以连接“Load Video (Upload) 🎥🅥🅗🅢”节点');
330 | }
331 | }
332 | if (HuiseN['cs_video2']) {
333 | if (output[HuiseN['cs_video2'][0]].class_type == 'VHS_LoadVideo') {
334 | postData['cs_video_nodes'].push({node: HuiseN['cs_video2'][0], desc: HuiseN['cs_video2_desc']});
335 | } else {
336 | return ('“custom_video2”只可以连接“Load Video (Upload) 🎥🅥🅗🅢”节点');
337 | }
338 | }
339 | if (HuiseN['cs_video3']) {
340 | if (output[HuiseN['cs_video3'][0]].class_type == 'VHS_LoadVideo') {
341 | postData['cs_video_nodes'].push({node: HuiseN['cs_video3'][0], desc: HuiseN['cs_video3_desc']});
342 | } else {
343 | return ('“custom_video3”只可以连接“Load Video (Upload) 🎥🅥🅗🅢”节点');
344 | }
345 | }
346 |
347 | postData['cs_text_nodes'] = [];
348 | if (HuiseN['cs_text1']) {
349 | if (output[HuiseN['cs_text1'][0]] && typeof output[HuiseN['cs_text1'][0]].inputs !== 'undefined' && typeof output[HuiseN['cs_text1'][0]].inputs.text !== 'undefined') {
350 | postData['cs_text_nodes'].push({node: HuiseN['cs_text1'][0], desc: HuiseN['cs_text1_desc']});
351 | } else {
352 | return ('“custom_text1”只可以连接“textInput”节点');
353 | }
354 | }
355 | if (HuiseN['cs_text2']) {
356 | if (output[HuiseN['cs_text2'][0]] && typeof output[HuiseN['cs_text2'][0]].inputs !== 'undefined' && typeof output[HuiseN['cs_text2'][0]].inputs.text !== 'undefined') {
357 | postData['cs_text_nodes'].push({node: HuiseN['cs_text2'][0], desc: HuiseN['cs_text2_desc']});
358 | } else {
359 | return ('“custom_text2”只可以连接“textInput”节点');
360 | }
361 | }
362 | if (HuiseN['cs_text3']) {
363 | if (output[HuiseN['cs_text3'][0]] && typeof output[HuiseN['cs_text3'][0]].inputs !== 'undefined' && typeof output[HuiseN['cs_text3'][0]].inputs.text !== 'undefined') {
364 | postData['cs_text_nodes'].push({node: HuiseN['cs_text3'][0], desc: HuiseN['cs_text3_desc']});
365 | } else {
366 | return ('“custom_text3”只可以连接“textInput”节点');
367 | }
368 | }
369 | if (HuiseN['title']) {
370 | postData['title'] = HuiseN['title'];
371 | } else {
372 | return ('“app_title”, 不可为空,请填写作品标题');
373 | }
374 | if (HuiseN['gn_desc']) {
375 | postData['gn_desc'] = HuiseN['gn_desc'];
376 | } else {
377 | return ('“app_desc”, 不可为空,请填写作品功能介绍');
378 | }
379 | if (HuiseN['sy_desc']) {
380 | postData['sy_desc'] = HuiseN['sy_desc'];
381 | } else {
382 | return ('请填写作品使用说明');
383 | }
384 |
385 | if (HuiseN['fee'] >= 0) {
386 | postData['fee'] = HuiseN['fee'];
387 | } else {
388 | return ('“app_fee”不能小于0分,即0元');
389 | }
390 | if (HuiseN['free_times'] >= 0) {
391 | postData['free_times'] = HuiseN['free_times'];
392 | } else {
393 | return ('“free_times”不能小于0');
394 | }
395 | postData['uniqueid'] = HuiseN['uniqueid'];
396 | postData['output'] = output;
397 | postData['workflow'] = prompt['workflow']
398 | return postData;
399 | }
400 | }
401 |
402 | async function requestExe(r, postData) {
403 | var comfyuitid = localStorage.getItem('comfyuitid')??'';
404 | const response = await api.fetchApi(`/manager/tech_zhulu`, {
405 | method: 'POST',
406 | headers: {'Content-Type': 'application/json'},
407 | body: JSON.stringify({
408 | r: r,
409 | comfyui_tid: comfyuitid,
410 | postData: postData
411 | })
412 | });
413 | if (!response.ok) {
414 | setTimeout(() => {
415 | showToast('网络连接出错,请保持电脑联网', 3000);
416 | }, 300);
417 | return;
418 | }
419 | const resdata = await response.json();
420 | return resdata;
421 | }
422 |
423 | async function login(s_key) {
424 | let res = await requestExe('comfyui.apiv2.code', {s_key: s_key});
425 | if (app.ui.dialog.element.style.display != 'none') {
426 | if (res.data.data.data.techsid.length > 5) {
427 | return res.data.data.data.techsid;
428 | } else {
429 | await new Promise(resolve => setTimeout(resolve, 800));
430 | return await login(s_key);
431 | }
432 | } else {
433 | return;
434 | }
435 | }
436 |
437 |
438 | async function request(r, postData) {
439 | showLoading('处理中,请稍后...');
440 | let resdata = await requestExe(r, postData);
441 | if (resdata.errno == 41009) {
442 | let resdata = await requestExe('comfyui.apiv2.code', {s_key: ''});
443 | if (resdata) {
444 | if (resdata.data.data.code == 1) {
445 | hideLoading();
446 | showQrBox(resdata.data.data.data, resdata.data.data.desc);
447 | let techsid = await login(resdata.data.data.s_key);
448 | hideCodeBox();
449 | if (techsid) {
450 | localStorage.setItem('comfyuitid', techsid);
451 | return await request(r, postData);
452 | } else {
453 | return;
454 | }
455 | } else {
456 | showToast(resdata.data.data.message);
457 | return;
458 | }
459 | }
460 | } else {
461 | hideLoading();
462 | return resdata;
463 | }
464 | }
465 |
466 | function backWeiget(info) {
467 | let widget = window.huise_widget??{}
468 | if(info.widgets) {
469 | let item_widget = {
470 | name: info.type,
471 | widgets: {}
472 | };
473 | let widgets = info.widgets;
474 | widgets.forEach((item,index) => {
475 | item_widget.widgets[item.name] = {
476 | type: item.type,
477 | name: item.name,
478 | value: item.value,
479 | options: item.options,
480 | };
481 | })
482 | widget[info.type] = item_widget;
483 | }
484 | window.huise_widget = widget
485 | }
486 |
487 | function chainCallback(object, property, callback) {
488 |
489 | if (object == undefined) {
490 | //This should not happen.
491 | console.error("Tried to add callback to non-existant object")
492 | return;
493 | }
494 | if (property in object) {
495 | const callback_orig = object[property]
496 | object[property] = function () {
497 | const r = callback_orig.apply(this, arguments);
498 | callback.apply(this, arguments);
499 | return r
500 | };
501 | } else {
502 | object[property] = callback;
503 | }
504 | }
505 |
506 | app.registerExtension({
507 | name: 'sdBxb',
508 | async beforeRegisterNodeDef(nodeType, nodeData, app) {
509 |
510 | const onNodeCreated = nodeType.prototype.onNodeCreated;
511 | chainCallback(nodeType.prototype, "onNodeCreated", function (){
512 | backWeiget(this);
513 | if (nodeData.name === 'sdBxb') {
514 | const r = onNodeCreated ? onNodeCreated?.apply(this, arguments) : undefined;
515 | const that = this;
516 | const zhanweiIndex = this.widgets.findIndex((w) => w.name === "zhanwei");
517 | const tech_button = $el('button.tech_button', {
518 | textContent: '该节点已废弃,请点击屏幕右上角封装应用', style: {},
519 | onclick: async () => {
520 | hideCodeBox();
521 | tech_alert('请点击屏幕右上角封装应用');
522 | return;
523 | if (loading) return;
524 | // try {
525 | // const prompt = await app.graphToPrompt()
526 | // let postData = getPostData(prompt);
527 | // if (postData['output']) {
528 | // try {
529 | // let resdata = await request('comfyui.apiv2.upload', postData);
530 | // if (resdata) {
531 | // if (resdata.data.data.code == 1) {
532 | // showCodeBox(resdata.data.data.list);
533 | // } else {
534 | // // showToast(resdata.data.data.message);
535 | // showMsg(resdata.data.data.message);
536 | // }
537 | // }
538 | // } catch (error) {
539 | // console.log(error);
540 | // hideLoading();
541 | // }
542 | //
543 | // } else {
544 | // tech_alert(postData);
545 | // return;
546 | // }
547 | // } catch (error) {
548 | // tech_alert('获取api数据失败');
549 | // return;
550 | // }
551 | }
552 | }
553 | )
554 | const dstr1 = '1、每创建一个新的“SD变现宝”节点,就对应一个新的作品;';
555 | const dstr2 = '2、如有问题,请加官方QQ群:967073981,联系作者咨询。';
556 | const dstr3 = '3、视频教程:https://www.bilibili.com/video/BV1Bsg8eeEjv';
557 | const directions = $el('div', {id: 'directions'}, ['特殊说明:', $el('br'), dstr1, $el('br'), dstr2, $el('br'), dstr3])
558 | const tech_box = $el('div', {id: 'tech_box'}, [tech_button, directions])
559 | this.addDOMWidget('select_styles', "btn", tech_box);
560 |
561 | const inputEl = document.createElement("input");
562 | inputEl.setAttribute("type", "text");
563 | inputEl.setAttribute("list", "uedynamiclist");
564 | inputEl.setAttribute("value", generateTimestampedRandomString());
565 | inputEl.className = "uniqueid";
566 | this.addDOMWidget('uniqueid', "input", inputEl, {
567 | getValue() {
568 | return inputEl.value;
569 | },
570 | setValue(v) {
571 | inputEl.value = v;
572 | },
573 | });
574 | setTimeout(() => {
575 | this.setSize([420, 500]);
576 | // if(serverUrl) {
577 | // this.widgets[3].value = serverUrl;
578 | // }
579 | // if(this.widgets[16].value == '.*') {
580 | // this.widgets[16].value = generateTimestampedRandomString();
581 | // }
582 | // console.log(this.widgets[16].value);
583 | }, 200)
584 | // console.log(that);
585 |
586 | return r;
587 | }
588 | })
589 |
590 | if (nodeData.name === 'sdBxb') {
591 | this.serialize_widgets = true //需要保存参数
592 | }
593 | },
594 | })
595 |
596 |
597 | setTimeout(() => {
598 | window.comfyui_app = app
599 | window.comfyui_api = api
600 | window.comfyui_ui = {ComfyDialog,$el}
601 | import('/huise_admin/input.js')
602 | }, 500)
603 |
604 |
605 | app.registerExtension({
606 | name: "Huise.menu",
607 | async setup() {
608 | // const menu = document.querySelector(".comfy-menu");
609 | // const huiseButton = document.createElement("button");
610 | // huiseButton.textContent = "绘色管理";
611 | // huiseButton.style.background = "linear-gradient(90deg, #00C9FF 0%, #92FE9D 100%)";
612 | // huiseButton.style.color = "black";
613 | // huiseButton.onclick = () => {
614 | // // console.dir(app)
615 | // let app_huise = document.getElementById('admin-app-huise')
616 | // console.log(app_huise)
617 | // if (!app_huise) {
618 | // setTimeout(() => {
619 | // import('/huise_admin/input.js')
620 | // }, 500)
621 | // } else {
622 | // if (app_huise.style.display == 'block') {
623 | // app_huise.style.display = 'none';
624 | // } else {
625 | // app_huise.style.display = 'block';
626 | // }
627 | // }
628 | // }
629 | // menu.append(huiseButton);
630 |
631 | },
632 | });
633 |
--------------------------------------------------------------------------------
/utils.py:
--------------------------------------------------------------------------------
1 | import base64
2 | import sys
3 | import time
4 | import urllib.request
5 | import subprocess
6 | import os
7 | import platform
8 | import tarfile
9 | import zipfile
10 | from functools import lru_cache
11 | import shutil
12 | from PIL import Image, PngImagePlugin
13 | import json
14 | from tqdm import tqdm
15 | import io
16 | from pathlib import Path
17 | import ssl
18 | import mimetypes
19 | import imghdr
20 | import piexif
21 | from .public import args, find_project_root, generate_md5_uid_timestamp, determine_file_type, find_project_custiom_nodes_path
22 | import folder_paths
23 | from concurrent.futures import ThreadPoolExecutor
24 | input_directory = folder_paths.get_input_directory()
25 | os.makedirs(input_directory, exist_ok=True)
26 | save_input_directory = input_directory + '/temp'
27 | os.makedirs(save_input_directory, exist_ok=True)
28 | FFMPEG_URLS = {
29 | "amd64-static": "https://tt-1254127940.cos.ap-guangzhou.myqcloud.com/ffmpeg/ffmpeg-release-amd64-static.tar.xz",
30 | "i686-static": "https://tt-1254127940.cos.ap-guangzhou.myqcloud.com/ffmpeg/ffmpeg-release-i686-static.tar.xz",
31 | "arm64-static": "https://tt-1254127940.cos.ap-guangzhou.myqcloud.com/ffmpeg/ffmpeg-release-arm64-static.tar.xz",
32 | "armhf-static": "https://tt-1254127940.cos.ap-guangzhou.myqcloud.com/ffmpeg/ffmpeg-release-armhf-static.tar.xz",
33 | "armel-static": "https://tt-1254127940.cos.ap-guangzhou.myqcloud.com/ffmpeg/ffmpeg-release-armel-static.tar.xz",
34 | "osx64": "https://tt-1254127940.cos.ap-guangzhou.myqcloud.com/ffmpeg/ffmpeg-116599-g43cde54fc1.zip",
35 | "win64-full": "https://tt-1254127940.cos.ap-guangzhou.myqcloud.com/ffmpeg/ffmpeg-release-essentials.zip"
36 | }
37 | ssl._create_default_https_context = ssl._create_unverified_context
38 | ffmpeg_path_exe = ''
39 | ffprobe_exe_path = ''
40 | temp_path = find_project_custiom_nodes_path() + 'ComfyUI_Bxb/temp_bxb/'
41 | try:
42 | resample_filter = Image.Resampling.LANCZOS
43 | except AttributeError:
44 | resample_filter = Image.LANCZOS
45 | def file_to_base64(file_path):
46 | mime_type, _ = mimetypes.guess_type(file_path)
47 | if mime_type is None:
48 | raise ValueError("无法识别的文件类型")
49 | with open(file_path, "rb") as file:
50 | base64_encoded = base64.b64encode(file.read()).decode('utf-8')
51 | base64_prefix = f"data:{mime_type};base64,"
52 | return base64_prefix + base64_encoded
53 | def compress_image(input_image_path, output_image_path=None, target_width=None, quality=85):
54 |
55 | img = Image.open(input_image_path)
56 | if target_width is None:
57 | target_width = img.size[0]
58 | width_percent = target_width / float(img.size[0])
59 | target_height = int((float(img.size[1]) * float(width_percent)))
60 | img_resized = img.resize((target_width, target_height), Image.LANCZOS)
61 | if output_image_path is None:
62 | output_image_path = input_image_path
63 | if output_image_path == input_image_path:
64 | os.remove(input_image_path)
65 | img_resized.save(output_image_path, quality=quality, optimize=True)
66 | def cut_and_compress_video(input_video_path, output_video_path=None, target_width=None, duration=None):
67 |
68 | probe_command = [
69 | ffmpeg_path_exe, '-v', 'error', '-select_streams', 'v:0', '-show_entries',
70 | 'stream=width,height', '-of', 'csv=p=0:s=x', input_video_path
71 | ]
72 | result = subprocess.run(probe_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
73 | original_width, original_height = map(int, result.stdout.strip().split('x'))
74 | if target_width is None:
75 | target_width = original_width
76 | width_ratio = target_width / float(original_width)
77 | target_height = int(original_height * width_ratio)
78 | if output_video_path is None:
79 | output_video_path = input_video_path
80 | command = [
81 | ffmpeg_path_exe,
82 | '-i', input_video_path,
83 | '-vf', f'scale={target_width}:{target_height}',
84 | '-r', '24',
85 | '-c:v', 'libx264',
86 | '-preset', 'medium',
87 | '-crf', '28',
88 | '-c:a', 'aac',
89 | '-b:a', '128k',
90 | '-strict', 'experimental',
91 | '-y',
92 | output_video_path
93 | ]
94 | if duration is not None:
95 | command.insert(1, '-t')
96 | command.insert(2, str(duration))
97 | subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True)
98 | def get_platform():
99 | architecture = platform.machine()
100 | if sys.platform.startswith("linux"):
101 | if architecture == "x86_64":
102 | return "amd64-static"
103 | elif architecture == "i686":
104 | return "i686-static"
105 | elif architecture == "aarch64":
106 | return "arm64-static"
107 | elif architecture == "armv7l":
108 | return "armhf-static"
109 | elif architecture == "armel":
110 | return "armel-static"
111 | elif sys.platform.startswith("darwin"):
112 | return "osx64"
113 | elif sys.platform.startswith("win"):
114 | return "win64-full"
115 | else:
116 | raise RuntimeError("Unsupported platform")
117 | def download_ffmpeg(target_dir):
118 | def reporthook(block_num, block_size, total_size):
119 | if reporthook.tbar is None:
120 | reporthook.tbar = tqdm(total=total_size, unit='B', unit_scale=True)
121 | downloaded = block_num * block_size
122 | if downloaded < total_size:
123 | reporthook.tbar.update(block_size)
124 | else:
125 | reporthook.tbar.close()
126 | reporthook.tbar = None
127 | reporthook.tbar = None
128 | plat = get_platform()
129 | url = FFMPEG_URLS.get(plat)
130 | if not url:
131 | raise RuntimeError(f"不支持的平台或配置: {plat}")
132 | target_dir = Path(target_dir).resolve()
133 | target_dir.mkdir(parents=True, exist_ok=True)
134 | file_extension = url.split('.')[-1]
135 | archive_path = target_dir / f"ffmpeg-{plat}.{file_extension}"
136 | print(f"正在下载 FFmpeg for {plat}...")
137 | no_proxy_handler = urllib.request.ProxyHandler({})
138 | opener = urllib.request.build_opener(no_proxy_handler)
139 | with opener.open(url) as response:
140 | with open(archive_path, 'wb') as out_file:
141 | block_size = 8192
142 | total_size = int(response.headers.get('Content-Length', 0))
143 | reporthook(0, block_size, total_size)
144 | while True:
145 | buffer = response.read(block_size)
146 | if not buffer:
147 | break
148 | out_file.write(buffer)
149 | reporthook(len(buffer) // block_size, block_size, total_size)
150 | print(f"正在解压 FFmpeg...")
151 | extracted_dir = target_dir / "ffmpeg"
152 | if file_extension == "zip":
153 | with zipfile.ZipFile(archive_path, 'r') as zip_ref:
154 | zip_ref.extractall(extracted_dir)
155 | elif file_extension == "xz":
156 | with tarfile.open(archive_path, 'r:xz') as tar_ref:
157 | tar_ref.extractall(extracted_dir)
158 | ffmpeg_exe = find_ffmpeg_executable(extracted_dir)
159 | os.remove(archive_path)
160 | if not ffmpeg_exe or not ffmpeg_exe.exists():
161 | raise RuntimeError(f"从 {archive_path} 提取 FFmpeg 可执行文件失败")
162 | return str(ffmpeg_exe)
163 | @lru_cache()
164 | def find_ffmpeg_executable(search_dir):
165 |
166 | if not os.path.exists(search_dir):
167 | return None
168 | os.chmod(search_dir, 0o755)
169 | for root, dirs, files in os.walk(search_dir):
170 | for file in files:
171 | if file.lower() == "ffmpeg.exe" or file.lower() == "ffmpeg":
172 | return Path(root) / file
173 | return None
174 | @lru_cache()
175 | def find_ffprobe_executable(search_dir):
176 |
177 | for root, dirs, files in os.walk(search_dir):
178 | for file in files:
179 | if file.lower() == "ffprobe.exe" or file.lower() == "ffprobe":
180 | return Path(root) / file
181 | return None
182 | @lru_cache()
183 | def get_ffmpeg_executable():
184 | global ffmpeg_path_exe, ffprobe_exe_path
185 | target_dir = find_project_custiom_nodes_path() + 'ComfyUI_Bxb/tools/'
186 | ffmpeg_exe = find_ffmpeg_executable(target_dir + "/ffmpeg")
187 | if ffmpeg_exe and ffmpeg_exe.exists() and is_valid_exe(ffmpeg_exe):
188 | ffmpeg_path_exe = str(ffmpeg_exe)
189 | ffprobe_exe = find_ffprobe_executable(target_dir + "/ffmpeg")
190 | ffprobe_exe_path = str(ffprobe_exe)
191 | os.chmod(ffprobe_exe_path, 0o755)
192 | return str(ffmpeg_exe)
193 | remove_target_dir = target_dir + "/ffmpeg"
194 | if os.path.exists(remove_target_dir):
195 | shutil.rmtree(remove_target_dir)
196 | exe = download_ffmpeg(target_dir)
197 | if exe and os.path.isfile(exe) and is_valid_exe(exe):
198 | ffmpeg_path_exe = exe
199 | ffprobe_exe = find_ffprobe_executable(target_dir + "/ffmpeg")
200 | ffprobe_exe_path = str(ffprobe_exe)
201 | os.chmod(ffprobe_exe_path, 0o755)
202 | return exe
203 | raise RuntimeError("FFmpeg not found or is invalid.")
204 | def is_valid_exe(exe):
205 |
206 | try:
207 | os.chmod(exe, 0o755)
208 | result = subprocess.run([exe, '-version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
209 | return result.returncode == 0
210 | except Exception:
211 | return False
212 | from io import BytesIO
213 | def get_image_dimensions(input_file, custom_data=None):
214 | with Image.open(input_file) as img:
215 | width, height = img.size
216 | img_format = img.format
217 | if not custom_data or (img_format != 'PNG' and img_format != 'JPEG'):
218 | file_size_bytes = os.path.getsize(input_file)
219 | file_size_mb = file_size_bytes / (1024 * 1024)
220 | return width, height, file_size_mb
221 | img_byte_arr = BytesIO()
222 | if img_format == 'PNG':
223 | meta = PngImagePlugin.PngInfo()
224 | for key, value in custom_data.items():
225 | if isinstance(value, bytes):
226 | value = base64.b64encode(value).decode('utf-8')
227 | meta.add_text(key, value)
228 | img.save(img_byte_arr, format="PNG", pnginfo=meta)
229 | elif img_format == 'JPEG':
230 | img = img.convert("RGB")
231 | exif_dict = {"0th": {}, "Exif": {}, "GPS": {}, "1st": {}, "thumbnail": None}
232 | for key, value in custom_data.items():
233 | exif_dict["0th"][piexif.ImageIFD.Make] = str(value)
234 | exif_bytes = piexif.dump(exif_dict)
235 | img.save(img_byte_arr, format="JPEG", exif=exif_bytes)
236 | img_byte_arr.seek(0)
237 | image_bytes = img_byte_arr.getvalue()
238 | with open(input_file, "wb") as f:
239 | f.write(image_bytes)
240 | file_size_bytes = len(image_bytes)
241 | file_size_mb = file_size_bytes / (1024 * 1024)
242 | return width, height, file_size_mb
243 | def get_video_dimensions(input_file):
244 | command = [
245 | ffprobe_exe_path,
246 | '-v', 'error',
247 | '-select_streams', 'v:0',
248 | '-show_entries', 'stream=width,height',
249 | '-of', 'json',
250 | input_file
251 | ]
252 | result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
253 | result_json = json.loads(result.stdout)
254 | width = result_json['streams'][0]['width']
255 | height = result_json['streams'][0]['height']
256 | file_size_bytes = os.path.getsize(input_file)
257 | file_size_mb = file_size_bytes / (1024 * 1024)
258 | return width, height, file_size_mb
259 | def cut_video(input_file, start_seconds, end_seconds, output_file, width, height, fps=24, threads=4):
260 |
261 | duration = end_seconds - start_seconds
262 | if duration <= 0:
263 | raise ValueError("结束时间必须大于开始时间")
264 | scale_filter = f'scale={width}:{height}:force_original_aspect_ratio=increase'
265 | crop_filter = f'crop={width}:{height}'
266 | filter_complex = f'{scale_filter},{crop_filter}'
267 | command = [
268 | ffmpeg_path_exe,
269 | '-ss', str(start_seconds),
270 | '-i', input_file,
271 | '-t', str(duration),
272 | '-r', str(fps),
273 | '-vf', filter_complex,
274 | '-c:v', 'libx264',
275 | '-c:a', 'aac',
276 | '-threads', str(threads),
277 | '-strict', 'experimental',
278 | '-y',
279 | output_file
280 | ]
281 | subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True)
282 | return duration
283 | def loop_video_to_duration(input_file, output_file, target_duration):
284 |
285 | command = [
286 | ffmpeg_path_exe,
287 | '-stream_loop', '-1',
288 | '-i', input_file,
289 | '-t', str(target_duration),
290 | '-c:v', 'libx264',
291 | '-c:a', 'aac',
292 | '-threads', '8',
293 | '-y',
294 | output_file
295 | ]
296 | subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True)
297 | def merge_videos_horizontally(input_file1, input_file2, output_file='temp_frames'):
298 |
299 | command = [
300 | ffmpeg_path_exe,
301 | '-i', input_file1,
302 | '-i', input_file2,
303 | '-filter_complex', '[0:v][1:v]hstack=inputs=2[v]',
304 | '-map', '[v]',
305 | '-c:v', 'libx264',
306 | '-threads', '8',
307 | '-y',
308 | output_file
309 | ]
310 | subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True)
311 | def get_file_count_in_directory():
312 | directory = Path(temp_path)
313 | if not directory.exists():
314 | directory.mkdir(parents=True, exist_ok=True)
315 | file_count = len([file for file in directory.iterdir() if file.is_file()])
316 | return file_count
317 | def resize_and_crop_image(image_path, output_path, width, height):
318 |
319 | command = [
320 | ffmpeg_path_exe,
321 | '-i', str(image_path),
322 | '-vf', f"scale={width}:{height}:force_original_aspect_ratio=increase,crop={width}:{height}",
323 | '-q:v', '2',
324 | str(output_path)
325 | ]
326 | try:
327 | subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True)
328 | except subprocess.CalledProcessError as e:
329 | print(f"Error during resizing and cropping image: {e}")
330 | def process_image(input_image_path_left=None, input_image_path_right=None, output_image_path="output.png",
331 | canvas_size=(800, 600), overlay_image_path=None):
332 | canvas = Image.new('RGBA', canvas_size, (0, 0, 0, 0))
333 | half_width = canvas_size[0] // 2
334 | target_height = canvas_size[1]
335 | if input_image_path_left:
336 | img_left = Image.open(input_image_path_left).convert("RGBA")
337 | img_width, img_height = img_left.size
338 | scale = max(half_width / img_width, target_height / img_height)
339 | new_size = (int(img_width * scale), int(img_height * scale))
340 | img_resized = img_left.resize(new_size, resample_filter)
341 | img_cropped = img_resized.crop((
342 | (img_resized.width - half_width) // 2,
343 | (img_resized.height - target_height) // 2,
344 | (img_resized.width + half_width) // 2,
345 | (img_resized.height + target_height) // 2
346 | ))
347 | canvas.paste(img_cropped, (0, 0), img_cropped)
348 | if input_image_path_right:
349 | img_right = Image.open(input_image_path_right).convert("RGBA")
350 | img_width, img_height = img_right.size
351 | scale = max(half_width / img_width, target_height / img_height)
352 | new_size = (int(img_width * scale), int(img_height * scale))
353 | img_resized = img_right.resize(new_size, resample_filter)
354 | img_cropped = img_resized.crop((
355 | (img_resized.width - half_width) // 2,
356 | (img_resized.height - target_height) // 2,
357 | (img_resized.width + half_width) // 2,
358 | (img_resized.height + target_height) // 2
359 | ))
360 | canvas.paste(img_cropped, (half_width, 0), img_cropped)
361 | if overlay_image_path and os.path.exists(overlay_image_path):
362 | overlay_img = Image.open(overlay_image_path).convert("RGBA")
363 | overlay_scale = min(canvas_size[0] / 2 / overlay_img.width, canvas_size[1] / 2 / overlay_img.height)
364 | overlay_new_size = (int(overlay_img.width * overlay_scale), int(overlay_img.height * overlay_scale))
365 | if input_image_path_left is not None and input_image_path_right is not None and os.path.exists(input_image_path_left) and os.path.exists(input_image_path_right):
366 | overlay_new_size = (int(canvas_size[0] / 15), int(canvas_size[0] / 15))
367 | overlay_img_resized = overlay_img.resize(overlay_new_size, resample_filter)
368 | overlay_x = (canvas_size[0] - overlay_new_size[0]) // 2
369 | overlay_y = (canvas_size[1] - overlay_new_size[1]) // 2
370 | canvas.paste(overlay_img_resized, (overlay_x, overlay_y), overlay_img_resized)
371 | canvas.save(output_image_path, format='PNG', optimize=True)
372 | def resize_and_crop(image, target_width, target_height):
373 |
374 | scale = max(target_width / image.width, target_height / image.height)
375 | new_size = (int(image.width * scale), int(image.height * scale))
376 | resized_image = image.resize(new_size, resample_filter)
377 | left = (resized_image.width - target_width) // 2
378 | top = (resized_image.height - target_height) // 2
379 | right = (resized_image.width + target_width) // 2
380 | bottom = (resized_image.height + target_height) // 2
381 | cropped_image = resized_image.crop((left, top, right, bottom))
382 | return cropped_image
383 | def process_and_merge_image_video(image_file, video_file, output_file, overlay_image, side='left', target_width=330, target_height=480,
384 | start_seconds=0, end_seconds=None, fps=24):
385 |
386 | temp_black_background = ''
387 | if not image_file:
388 | img = Image.new('RGBA', (target_width, target_height), (0, 0, 0, 255))
389 | temp_black_background = temp_path + generate_md5_uid_timestamp(
390 | 'temp_black_background') + 'temp_black_background.png'
391 | img.save(temp_black_background)
392 | image_file = temp_black_background
393 | image = Image.open(image_file).convert("RGBA")
394 | image_resized = resize_and_crop(image, target_width, target_height)
395 | video_resized_png = temp_path + generate_md5_uid_timestamp(str(time.process_time())) + 'resized_video.png'
396 | image_resized.save(video_resized_png)
397 | video_file_temp = ''
398 | if end_seconds is not None:
399 | video_file_temp = temp_path + generate_md5_uid_timestamp(str(time.process_time())) + 'cut_video.mp4'
400 | cut_video(video_file, start_seconds, end_seconds, video_file_temp, target_width, target_height, fps)
401 | video_file = video_file_temp
402 | scale_filter = f'scale={target_width}:{target_height}:force_original_aspect_ratio=increase'
403 | crop_filter = f'crop={target_width}:{target_height}'
404 | filter_complex = f'{scale_filter},{crop_filter}'
405 | overlay_new_width = int(target_width * 2 / 15)
406 | overlay_new_height = int(target_width * 2 / 15)
407 | overlay_x = (target_width * 2 - overlay_new_width) // 2
408 | overlay_y = (target_height - overlay_new_height) // 2
409 | if side == 'left':
410 | filter_complex = (
411 | f"[0:v]{scale_filter},{crop_filter}[vid];"
412 | f"[1:v]scale={target_width}:{target_height}[img];"
413 | f"[img][vid]hstack=inputs=2[base];"
414 | f"[2:v]scale={overlay_new_width}:{overlay_new_height}[ovrl];"
415 | f"[base][ovrl]overlay={overlay_x}:{overlay_y}[v]"
416 | )
417 | else:
418 | filter_complex = (
419 | f"[0:v]{scale_filter},{crop_filter}[vid];"
420 | f"[1:v]scale={target_width}:{target_height}[img];"
421 | f"[vid][img]hstack=inputs=2[base];"
422 | f"[2:v]scale={overlay_new_width}:{overlay_new_height}[ovrl];"
423 | f"[base][ovrl]overlay={overlay_x}:{overlay_y}[v]"
424 | )
425 | command = [
426 | ffmpeg_path_exe,
427 | '-i', video_file,
428 | '-i', video_resized_png,
429 | '-i', 'pipe:0',
430 | '-filter_complex', filter_complex,
431 | '-map', '[v]',
432 | '-c:v', 'libx264',
433 | '-pix_fmt', 'yuv420p',
434 | '-threads', '4',
435 | '-an',
436 | '-y',
437 | output_file
438 | ]
439 | with open(overlay_image, 'rb') as overlay_img_file:
440 | subprocess.run(command, stdin=overlay_img_file, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True)
441 | os.remove(video_resized_png)
442 | if temp_black_background:
443 | os.remove(temp_black_background)
444 | if video_file_temp:
445 | os.remove(video_file_temp)
446 | def process_and_merge_videos(input_file1, input_file2, start_seconds1, end_seconds1, start_seconds2, end_seconds2,
447 | output_file, overlay_image, fps=24):
448 | global CANVAS_MAX_HEIGHT, CANVAS_MAX_WIDTH
449 | video_width1, video_height1, video_size = get_video_dimensions(input_file2)
450 | get_file_count_in_directory()
451 | cut1 = temp_path + generate_md5_uid_timestamp('cut1.mp4') + 'cut1.mp4'
452 | cut2 = temp_path + generate_md5_uid_timestamp('cut1.mp4') + 'cut2.mp4'
453 | cut1_looped = temp_path + generate_md5_uid_timestamp('cut1.mp4') + 'cut1_looped.mp4'
454 | cut2_looped = temp_path + generate_md5_uid_timestamp('cut1.mp4') + 'cut2_looped.mp4'
455 | max_width = CANVAS_MAX_WIDTH / 2
456 | max_height = CANVAS_MAX_HEIGHT
457 | max_width = video_width1
458 | max_height = video_height1
459 | duration1 = cut_video(input_file1, start_seconds1, end_seconds1, cut1, max_width, max_height, fps)
460 | duration2 = cut_video(input_file2, start_seconds2, end_seconds2, cut2, max_width, max_height, fps)
461 | max_duration = max(duration1, duration2)
462 | if duration1 < max_duration:
463 | loop_video_to_duration(cut1, cut1_looped, max_duration)
464 | video1_file = cut1_looped
465 | else:
466 | video1_file = cut1
467 | if duration2 < max_duration:
468 | loop_video_to_duration(cut2, cut2_looped, max_duration)
469 | video2_file = cut2_looped
470 | else:
471 | video2_file = cut2
472 | overlay_new_width = int(max_width * 2 / 15)
473 | overlay_new_height = int(max_width * 2 / 15)
474 | overlay_x = (max_width * 2 - overlay_new_width) // 2
475 | overlay_y = (max_height - overlay_new_height) // 2
476 | process_videos_with_overlay(video1_file, video2_file, overlay_image, output_file,
477 | overlay_new_width=overlay_new_width,
478 | overlay_new_height=overlay_new_height,
479 | overlay_x=overlay_x,
480 | overlay_y=overlay_y,
481 | )
482 | if os.path.exists(cut1):
483 | os.remove(cut1)
484 | if os.path.exists(cut2):
485 | os.remove(cut2)
486 | if os.path.exists(cut1_looped):
487 | os.remove(cut1_looped)
488 | if os.path.exists(cut2_looped):
489 | os.remove(cut2_looped)
490 | def process_videos_with_overlay(input_file1, input_file2, overlay_image, output_file,
491 | overlay_new_width=100, overlay_new_height=100,
492 | overlay_x=0, overlay_y=0, fps=24):
493 |
494 | command = [
495 | ffmpeg_path_exe,
496 | '-i', input_file1,
497 | '-i', input_file2,
498 | '-i', overlay_image,
499 | '-filter_complex',
500 | f"[0:v][1:v]hstack=inputs=2[base];"
501 | f"[2:v]scale={overlay_new_width}:{overlay_new_height}[ovrl];"
502 | f"[base][ovrl]overlay={overlay_x}:{overlay_y}",
503 | '-c:v', 'libx264',
504 | '-pix_fmt', 'yuv420p',
505 | '-threads', '8',
506 | '-y',
507 | output_file
508 | ]
509 | try:
510 | subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True)
511 | except subprocess.CalledProcessError as e:
512 | print(f"FFmpeg 命令执行失败: {e.stderr.decode()}")
513 | raise
514 | def apply_overlay_to_video(video_file, overlay_image_file, output_file, file_num=1):
515 |
516 | video_width, video_height, size_mb = get_video_dimensions(video_file)
517 | overlay_image = Image.open(overlay_image_file).convert("RGBA")
518 | overlay_ratio = overlay_image.width / overlay_image.height
519 | if file_num == 1:
520 | if video_width < video_height:
521 | overlay_new_width = video_width // 2
522 | overlay_new_height = int(overlay_new_width / overlay_ratio)
523 | else:
524 | overlay_new_height = video_height // 2
525 | overlay_new_width = int(overlay_new_height * overlay_ratio)
526 | else:
527 | overlay_new_width = int(video_width / 15)
528 | overlay_new_height = int(video_width / 15)
529 | overlay_resized = overlay_image.resize((overlay_new_width, overlay_new_height), resample_filter)
530 | img_byte_arr = io.BytesIO()
531 | overlay_resized.save(img_byte_arr, format='PNG')
532 | img_byte_arr = img_byte_arr.getvalue()
533 | overlay_x = (video_width - overlay_new_width) // 2
534 | overlay_y = (video_height - overlay_new_height) // 2
535 | command = [
536 | ffmpeg_path_exe,
537 | '-i', video_file,
538 | '-i', 'pipe:0',
539 | '-filter_complex',
540 | f'[1:v]scale={overlay_new_width}:{overlay_new_height}[ovrl];[0:v][ovrl]overlay={overlay_x}:{overlay_y}',
541 | '-c:v', 'libx264',
542 | '-pix_fmt', 'yuv420p',
543 | '-y',
544 | output_file
545 | ]
546 | process = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
547 | stdout, stderr = process.communicate(input=img_byte_arr)
548 | if process.returncode != 0:
549 | raise Exception(f"FFmpeg 命令执行失败: {stderr.decode()}")
550 | def is_image(file_path):
551 |
552 | if imghdr.what(file_path):
553 | return True
554 | mime_type, _ = mimetypes.guess_type(file_path)
555 | if mime_type and mime_type.startswith('image'):
556 | return mime_type
557 | return False
558 | def is_video(file_path):
559 |
560 | mime_type, _ = mimetypes.guess_type(file_path)
561 | if mime_type and mime_type.startswith('video'):
562 | return mime_type
563 | try:
564 | command = [
565 | ffmpeg_path_exe,
566 | '-v', 'error',
567 | '-select_streams', 'v:0',
568 | '-show_entries', 'stream=codec_name',
569 | '-of', 'csv=p=0',
570 | '-i', file_path
571 | ]
572 | result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
573 | codec_name = result.stdout.strip()
574 | if codec_name:
575 | mime_types = {
576 | 'h264': 'video/mp4',
577 | 'hevc': 'video/mp4',
578 | 'vp8': 'video/webm',
579 | 'vp9': 'video/webm',
580 | 'av1': 'video/mp4',
581 | 'mpeg4': 'video/mp4',
582 | 'theora': 'video/ogg',
583 | 'wmv3': 'video/x-ms-wmv',
584 | 'vp6f': 'video/x-flv',
585 | }
586 | return mime_types.get(codec_name, 'video/unknown')
587 | except FileNotFoundError:
588 | print(f"{ffmpeg_path_exe} not found, unable to confirm if the file is a video.")
589 | return False
590 | CANVAS_MAX_WIDTH = 1000
591 | CANVAS_MAX_HEIGHT = 1000
592 | END_SECONDS = 3
593 | def optimize_dimensions(w1, h1, w2, h2):
594 |
595 | def adjust(value):
596 | return value - (value % 4)
597 | half_width = min(w2, CANVAS_MAX_WIDTH // 2)
598 | height = int(half_width * h2 / w2)
599 | return adjust(half_width * 2), adjust(height)
600 | def process_file(file_path, side, target_width, target_height):
601 |
602 | file_info = {
603 | 'file_type': '',
604 | 'mime_type': '',
605 | 'width': 0,
606 | 'height': 0,
607 | 'base_64': ''
608 | }
609 | file_mime_type, file_type = determine_file_type(file_path)
610 | file_info['file_type'] = file_type
611 | file_info['mime_type'] = file_mime_type
612 | if file_type == 'image':
613 | width, height, size_mb = get_image_dimensions(file_path)
614 | if size_mb > 5:
615 | compress_image(file_path, target_width=target_width)
616 | width, height, _ = get_image_dimensions(file_path)
617 | file_info['width'] = width
618 | file_info['height'] = height
619 | file_info['base_64'] = file_to_base64(file_path)
620 | return file_path, file_info
621 | elif file_type == 'video':
622 | width, height, size_mb = get_video_dimensions(file_path)
623 | if size_mb > 20:
624 | cut_and_compress_video(file_path, target_width=target_width, duration=5)
625 | width, height, _ = get_video_dimensions(file_path)
626 | file_info['width'] = width
627 | file_info['height'] = height
628 | file_info['base_64'] = file_to_base64(file_path)
629 | return file_path, file_info
630 | else:
631 | raise ValueError(f"Unsupported file type for {side} side.")
632 | def do_zhutu(file_left='', file_right='', overlay_path=''):
633 |
634 | if not file_left and not file_right:
635 | return {'error': '请上传两张图片或两张视频', 'code': 1}
636 | out_put_name = generate_md5_uid_timestamp(overlay_path)
637 | out_put_png = out_put_name + '.png'
638 | out_put_mp4 = out_put_name + '.mp4'
639 | out_put_mp4_temp = out_put_name + 'temp.mp4'
640 | out_put_name_mp4_temp = folder_paths.get_input_directory() + '/' + out_put_mp4_temp
641 | out_put_name_mp4 = folder_paths.get_input_directory() + '/' + out_put_mp4
642 | out_put_name_png = folder_paths.get_input_directory() + '/' + out_put_png
643 | base_size = {}
644 | with ThreadPoolExecutor() as executor:
645 | futures = {}
646 | if file_left:
647 | futures['left'] = executor.submit(process_file, file_left, 'left', CANVAS_MAX_WIDTH // 2, CANVAS_MAX_HEIGHT)
648 | if file_right:
649 | futures['right'] = executor.submit(process_file, file_right, 'right', CANVAS_MAX_WIDTH // 2, CANVAS_MAX_HEIGHT)
650 | for side, future in futures.items():
651 | try:
652 | file_path, file_info = future.result()
653 | base_size[side] = file_info
654 | except Exception as e:
655 | return {'error': str(e), 'code': 1}
656 | if 'left' in base_size and 'right' in base_size:
657 | canvas_width, canvas_height = optimize_dimensions(
658 | base_size['left']['width'], base_size['left']['height'],
659 | base_size['right']['width'], base_size['right']['height']
660 | )
661 | else:
662 | canvas_width, canvas_height = CANVAS_MAX_WIDTH, CANVAS_MAX_HEIGHT
663 | try:
664 | if 'left' in base_size and 'right' in base_size:
665 | if base_size['left']['file_type'] == 'image' and base_size['right']['file_type'] == 'image':
666 | process_image(
667 | input_image_path_left=file_left,
668 | input_image_path_right=file_right,
669 | output_image_path=out_put_name_png,
670 | canvas_size=(canvas_width, canvas_height),
671 | overlay_image_path=overlay_path
672 | )
673 | result_type = 'image'
674 | elif base_size['left']['file_type'] == 'video' and base_size['right']['file_type'] == 'video':
675 | process_and_merge_videos(
676 | file_left, file_right, 0, END_SECONDS, 0, END_SECONDS,
677 | output_file=out_put_name_mp4,
678 | overlay_image=overlay_path
679 | )
680 | result_type = 'video'
681 | else:
682 | if base_size['left']['file_type'] == 'video':
683 | process_and_merge_image_video(
684 | file_right, file_left, out_put_name_mp4,
685 | overlay_path,
686 | side='right', target_width=canvas_width // 2, target_height=canvas_height,
687 | start_seconds=0, end_seconds=END_SECONDS, fps=24
688 | )
689 | else:
690 | process_and_merge_image_video(
691 | file_left, file_right, out_put_name_mp4,
692 | overlay_path,
693 | side='left', target_width=canvas_width // 2, target_height=canvas_height,
694 | start_seconds=0, end_seconds=END_SECONDS, fps=24
695 | )
696 | result_type = 'video'
697 | else:
698 | if 'left' in base_size:
699 | single_file = file_left
700 | side = 'left'
701 | file_info = base_size['left']
702 | else:
703 | single_file = file_right
704 | side = 'right'
705 | file_info = base_size['right']
706 | if file_info['file_type'] == 'image':
707 | process_image(
708 | **{f"input_image_path_{side}": single_file},
709 | output_image_path=out_put_name_png,
710 | canvas_size=(canvas_width, canvas_height),
711 | overlay_image_path=overlay_path
712 | )
713 | result_type = 'image'
714 | else:
715 | process_and_merge_image_video(
716 | None, single_file, out_put_name_mp4,
717 | overlay_path,
718 | side='right' if side == 'left' else 'left',
719 | target_width=canvas_width // 2, target_height=canvas_height,
720 | start_seconds=0, end_seconds=END_SECONDS, fps=24
721 | )
722 | result_type = 'video'
723 | if result_type == 'image':
724 | result_data = file_to_base64(out_put_name_png)
725 | filename = out_put_png
726 | else:
727 | result_data = file_to_base64(out_put_name_mp4)
728 | filename = out_put_mp4
729 | return {
730 | 'data': result_data,
731 | 'filename': filename,
732 | 'code': 0,
733 | 'type': result_type,
734 | 'size': {'width': canvas_width, 'height': canvas_height},
735 | 'base_size': base_size
736 | }
737 | finally:
738 | if os.path.exists(out_put_name_mp4_temp):
739 | os.remove(out_put_name_mp4_temp)
740 | def get_video_duration(video_path):
741 | result = subprocess.run(
742 | [ffprobe_exe_path, '-v', 'error', '-show_entries', 'format=duration',
743 | '-of', 'default=noprint_wrappers=1:nokey=1', video_path],
744 | stdout=subprocess.PIPE,
745 | stderr=subprocess.PIPE,
746 | text=True
747 | )
748 | return float(result.stdout.strip())
749 | def extract_frames(video_path, num_frames=3):
750 | duration = get_video_duration(video_path)
751 | frame_times = []
752 | if duration < 1.0:
753 | frame_times = [0]
754 | else:
755 | frame_times = [(duration / (num_frames + 1)) * (i + 1) for i in range(num_frames)]
756 | frame_contents = []
757 | for time in frame_times:
758 | result = subprocess.run(
759 | [ffmpeg_path_exe, '-ss', str(time), '-i', video_path, '-vframes', '1',
760 | '-f', 'image2pipe', '-vcodec', 'png', '-'],
761 | stdout=subprocess.PIPE,
762 | stderr=subprocess.PIPE
763 | )
764 | frame_contents.append(result.stdout)
765 | return frame_contents
766 |
--------------------------------------------------------------------------------
/wss.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import json
3 | import os
4 | import queue
5 | import time
6 | import urllib
7 | import uuid
8 | import aiohttp
9 | import urllib.request
10 | import urllib.parse
11 | import collections
12 | from concurrent.futures import ThreadPoolExecutor, as_completed
13 | from threading import Lock, Condition
14 | import websockets
15 | WEBSOCKETS_VERSION = tuple(map(int, websockets.__version__.split('.')))
16 | def is_websocket_connected(websocket_conn):
17 |
18 | if websocket_conn is None:
19 | return False
20 | if WEBSOCKETS_VERSION < (14, 0):
21 | return websocket_conn.open
22 | else:
23 | return websocket_conn.state == 1
24 | import threading
25 | from .public import (get_output, write_json_to_file, get_address, get_port, get_port_from_cmdline, args, \
26 | find_project_root, get_workflow, get_base_url, get_filenames, read_json_file,merge_alpha_channels,
27 | generate_large_random_number, generate_md5_uid_timestamp_filename, loca_download_image, print_exception_in_chinese, determine_file_type, get_upload_url, send_binary_data, remove_query_parameters, combine_images, send_binary_data_async, find_project_custiom_nodes_path)
28 | from .utils import get_video_dimensions, get_image_dimensions, extract_frames
29 | import folder_paths
30 | output_directory = folder_paths.get_output_directory()
31 | SERVER_1_URI = "wss://tt.9syun.com/wss"
32 | ADDRESS = get_address()
33 | PORT = get_port_from_cmdline()
34 | HTTP_ADDRESS = "http://{}:{}/".format(ADDRESS, PORT)
35 | new_client_w_id = f"{str(uuid.uuid4())}:{get_port()}"
36 | SERVER_2_URI = "ws://{}:{}/ws?clientId={}".format(ADDRESS, PORT, new_client_w_id)
37 | RECONNECT_DELAY = 1
38 | MAX_RECONNECT_DELAY = 3
39 | task_queue_1 = queue.Queue()
40 | task_queue_2 = queue.Queue()
41 | task_queue_3 = {}
42 | websocket_queue = collections.deque()
43 | websocket_conn1 = None
44 | websocket_conn2 = None
45 | websocket_conn3 = None
46 | history_data = {
47 | 'queue_running': [],
48 | 'queue_pending': []
49 | }
50 | history_prompt_ids = []
51 | class MonitoredThreadPoolExecutor(ThreadPoolExecutor):
52 | def __init__(self, max_workers=None, thread_name_prefix=''):
53 | super().__init__(max_workers=max_workers, thread_name_prefix=thread_name_prefix)
54 | self._lock = Lock()
55 | self._condition = Condition(self._lock)
56 | self._active_tasks = 0
57 | self._max_workers = max_workers
58 | def submit(self, fn, *args, **kwargs):
59 | with self._lock:
60 | while self._active_tasks >= self._max_workers:
61 | self._condition.wait()
62 | self._active_tasks += 1
63 | future = super().submit(self._wrap_task(fn), *args, **kwargs)
64 | return future
65 | def _wrap_task(self, fn):
66 | def wrapped_fn(*args, **kwargs):
67 | try:
68 | return fn(*args, **kwargs)
69 | finally:
70 | with self._lock:
71 | self._active_tasks -= 1
72 | self._condition.notify_all()
73 | return wrapped_fn
74 | def active_tasks(self):
75 | with self._lock:
76 | return self._active_tasks
77 | executor = MonitoredThreadPoolExecutor(max_workers=40)
78 | class UploadManager:
79 | def __init__(self, session, url_result, post_file_arr, post_uir_arr, base_url):
80 | self.session = session
81 | self.url_result = url_result
82 | self.post_file_arr = post_file_arr
83 | self.post_uir_arr = post_uir_arr
84 | self.binary_arr = ['frame', 'auth']
85 | self.base_url = base_url
86 | self.json_arr = []
87 | self.auth_arr = []
88 | def prepare_tasks(self):
89 | tasks = []
90 | for index, item1 in enumerate(self.url_result['data']['data']):
91 | main_url = item1['url']
92 | main_post_file = self.post_file_arr[index]['url']
93 | main_post_uir = self.post_uir_arr[index]['url']
94 | is_binary = item1['type'] in self.binary_arr
95 | mime_type = self.post_uir_arr[index]['url']
96 | if not is_binary:
97 | main_post_file = f"{self.base_url}/{main_post_file}"
98 | tasks.append((main_url, main_post_file, is_binary, mime_type, index, False, 0))
99 | for key, value in enumerate(item1.get('urls', [])):
100 | url = value['url']
101 | post_file = self.post_file_arr[index]['urls'][key]['url']
102 | post_uir = self.post_uir_arr[index]['urls'][key]['url']
103 | file_type = self.post_uir_arr[index]['urls'][key]['type']
104 | is_binary = file_type in self.binary_arr
105 | if not is_binary:
106 | post_file = f"{self.base_url}/{post_file}"
107 | tasks.append((url, post_file, is_binary, post_uir, index, True, key))
108 | return tasks
109 | def upload_task(self, *args):
110 | import time
111 | start_time = time.time()
112 | upload_url, post_file, is_binary, mime_type, index, is_sub_url, key = args
113 | upload_status, upload_meaage = send_binary_data_async(upload_url, post_file, is_binary, mime_type)
114 | if not upload_status:
115 | time.sleep(0.5)
116 | upload_status, upload_meaage = send_binary_data_async(upload_url, post_file, is_binary, mime_type)
117 | if not upload_status:
118 | raise Exception(upload_meaage)
119 | cleaned_url = remove_query_parameters(upload_url)
120 | elapsed_time = time.time() - start_time
121 | return cleaned_url, index, is_sub_url, key, elapsed_time, is_binary
122 | def start_sync(self):
123 | tasks = self.prepare_tasks()
124 | results = []
125 | with ThreadPoolExecutor(max_workers=15) as executor1:
126 | futures = {executor1.submit(self.upload_task, *task): task for task in tasks}
127 | for future in as_completed(futures):
128 | try:
129 | cleaned_url, index, is_sub_url, key, elapsed_time, is_binary = future.result()
130 | if is_sub_url:
131 | self.url_result['data']['data'][index]['urls'][key]['url'] = cleaned_url
132 | else:
133 | self.url_result['data']['data'][index]['url'] = cleaned_url
134 | results.append((cleaned_url, index, is_sub_url, key))
135 | except Exception as e:
136 | raise Exception(str(e)+cleaned_url)
137 | pass
138 | return results
139 | def get(self):
140 | for index, item1 in enumerate(self.url_result['data']['data']):
141 | if item1['type'] == 'auth':
142 | self.auth_arr.append(item1)
143 | else:
144 | self.json_arr.append(item1)
145 | return self.json_arr, self.auth_arr, self.url_result['data']['data']
146 | async def websocket_connect(uri, conn_identifier):
147 | global websocket_conn1, websocket_conn2, send_time
148 | reconnect_delay = RECONNECT_DELAY
149 | while True:
150 | try:
151 | async with websockets.connect(uri) as websocket:
152 | print(f"{conn_identifier} 连接成功")
153 | if conn_identifier == 1:
154 | websocket_conn1 = websocket
155 | else:
156 | websocket_conn2 = websocket
157 | for key, val in task_queue_3.items():
158 | is_set = key in history_prompt_ids
159 | if is_set:
160 | pass
161 | else:
162 | task_queue_2.put({
163 | 'type': 'send',
164 | 'prompt_id': key,
165 | })
166 | reconnect_delay = RECONNECT_DELAY
167 | tasks = [
168 | asyncio.create_task(receive_messages(websocket, conn_identifier)),
169 | asyncio.create_task(send_heartbeat())
170 | ]
171 | await asyncio.gather(*tasks)
172 | except websockets.ConnectionClosedError as e:
173 | print_exception_in_chinese(e)
174 | await asyncio.sleep(reconnect_delay)
175 | except websockets.ConnectionClosedOK as e:
176 | print_exception_in_chinese(e)
177 | await asyncio.sleep(reconnect_delay)
178 | except Exception as e:
179 | await asyncio.sleep(reconnect_delay)
180 | reconnect_delay = min(reconnect_delay * 2, MAX_RECONNECT_DELAY)
181 | def get_history_prompt(prompt_id):
182 | try:
183 | if is_websocket_connected(websocket_conn2):
184 | with urllib.request.urlopen(HTTP_ADDRESS + 'history' + '/' + prompt_id) as response:
185 | return json.loads(response.read())
186 | else:
187 | return {}
188 | except Exception as e:
189 | print(f"\033[91m 服务正在连接中{get_time()} \033[0m")
190 | return {}
191 | async def getHistoryPrompt(prompt_id, type_a=''):
192 | result_data = [{"type": "str", "k": 'prompt_id', "v": prompt_id}]
193 | result = get_history_prompt(prompt_id)
194 | response_status = None
195 | post_uir_arr = []
196 | post_file_arr = []
197 | image_info_list = []
198 | try:
199 | if prompt_id in result:
200 | result = result[prompt_id]
201 | status = result.get('status', {})
202 | if status.get('completed', False):
203 | file_num = 0
204 | result_data.append({"type": "str", "k": 'ok', "v": '1'})
205 | for index, output in enumerate(result.get('outputs', {}).values()):
206 | for media in ['images', 'gifs', 'videos']:
207 | if media in output:
208 | for item in output[media]:
209 | if 'filename' in item and item['type'] == 'output':
210 | if item['subfolder'] != '':
211 | item['filename'] = item['subfolder'] + '/' + item['filename']
212 | file_num += 1
213 | item_url_info = {}
214 | mine_type, file_type = determine_file_type(folder_paths.get_output_directory() + '/' + item['filename'])
215 | if file_type == 'video':
216 | width1, height1, size_mb = get_video_dimensions(folder_paths.get_output_directory() + '/' + item['filename'])
217 | else:
218 | width1, height1, size_mb = get_image_dimensions(folder_paths.get_output_directory() + '/' + item['filename'], {'TIMESTAMP': str(time.time()), 'PROMPTID': str(prompt_id)})
219 | item_url_info = {
220 | 'url': mine_type,
221 | 'file_type': file_type,
222 | 'width': width1,
223 | 'height': height1,
224 | 'ratio': height1 / width1,
225 | 'urls': [],
226 | 'type': 'result'
227 | }
228 | item_url_file = {
229 | 'url': item['filename'],
230 | 'urls': [],
231 | 'type': 'result'
232 | }
233 | if file_type == 'video':
234 | frame_contents = extract_frames(folder_paths.get_output_directory() + '/' + item['filename'])
235 | for k, frame_content in enumerate(frame_contents):
236 | image_info_list.append({
237 | 'type': 'binary',
238 | 'content': frame_content
239 | })
240 | if k == 0:
241 | item_url_info['urls'].append({
242 | 'url': 'image/png',
243 | 'width': width1,
244 | 'height': height1,
245 | 'ratio': height1 / width1,
246 | 'type': 'frame'
247 | })
248 | item_url_file['urls'].append({
249 | 'url': frame_content,
250 | 'type': 'frame'
251 | })
252 | else:
253 | image_info_list.append({
254 | 'type': 'path',
255 | 'content': folder_paths.get_output_directory() + '/' + item['filename']
256 | })
257 | post_uir_arr.append(item_url_info)
258 | post_file_arr.append(item_url_file)
259 | if file_num == 0:
260 | return
261 | pass
262 | else:
263 | result_data.append({"type": "str", "k": 'ok', "v": '0', 'text': 'completed状态不对'})
264 | else:
265 | is_set = prompt_id in history_prompt_ids
266 | if is_set:
267 | return
268 | result_data.append({"type": "str", "k": 'ok', "v": '0', 'text': 'prompt_id没有找到'})
269 | response_status = 200
270 | except Exception as e:
271 | print_exception_in_chinese(e)
272 | result_data.append({"type": "str", "k": 'ok', "v": '0', 'text': '异常的信息'})
273 | response_status = 500
274 | if len(image_info_list) > 0:
275 | binary_data_list = combine_images(image_info_list)
276 | for binary_data in binary_data_list:
277 | post_uir_arr.append({
278 | 'url': 'image/png',
279 | 'file_type': 'image',
280 | 'width': '',
281 | 'height': '',
282 | 'ratio': 1,
283 | 'upload_type': 0,
284 | 'urls': [],
285 | 'type': 'auth',
286 | 'index': 0
287 | })
288 | post_file_arr.append({
289 | 'url': binary_data,
290 | 'upload_type': 0,
291 | 'urls': [],
292 | 'type': 'auth',
293 | 'index': 0
294 | })
295 | submit_url = get_base_url() + 'comfyui.resultv2.formSubmitForComfyUi&m=tech_huise'
296 | connector = aiohttp.TCPConnector()
297 | async with aiohttp.ClientSession(connector=connector) as session:
298 | try:
299 | url_result = await get_upload_url(post_uir_arr, new_client_w_id, session, 2)
300 | manager = UploadManager(session, url_result, post_file_arr, post_uir_arr, folder_paths.get_output_directory())
301 | manager.start_sync()
302 | json_arr, auth_arr, post_arr = manager.get()
303 | result_data.append({"type": "str", "k": 'images', "v": json.dumps(json_arr)})
304 | result_data.append({"type": "str", "k": 'auth', "v": json.dumps(auth_arr)})
305 | form_res_data = await send_form_data(session, submit_url, result_data, prompt_id)
306 | except json.JSONDecodeError as e:
307 | print_exception_in_chinese(e)
308 | result_data.append({"type": "str", "k": 'ok', "v": '0', 'text': str(e)})
309 | result_data.append({"type": "str", "k": 'error', "v": str(e)})
310 | response_status = 400
311 | form_res_data = await send_form_data(session, submit_url, result_data, prompt_id)
312 | except Exception as e:
313 | print_exception_in_chinese(e)
314 | result_data.append({"type": "str", "k": 'ok', "v": '0', 'text': 'upload_url:'+str(e)})
315 | result_data.append({"type": "str", "k": 'error', "v": 'upload_url:'+str(e)})
316 | response_status = 500
317 | form_res_data = await send_form_data(session, submit_url, result_data, prompt_id)
318 | finally:
319 | if 'session' in locals():
320 | await session.close()
321 | return {'status': response_status,
322 | 'message': '操作完成.' if response_status == 200 else '发生错误.'}
323 | async def send_form_data(session, url, data, prompt_id=None):
324 | global websocket_conn1
325 | form_data = aiohttp.FormData()
326 | try:
327 | for item in data:
328 | if item['type'] == 'str':
329 | form_data.add_field(item['k'], item['v'])
330 | if item['type'] == 'images' or item['type'] == 'gifs' or item['type'] == 'videos' or item[
331 | 'type'] == 'files':
332 | if os.path.exists(item['v']):
333 | with open(item['v'], 'rb') as f:
334 | file_content = f.read()
335 | form_data.add_field(item['k'] + '[]', file_content, filename=os.path.basename(item['v']),
336 | content_type='application/octet-stream')
337 | pass
338 | else:
339 | pass
340 | if item['type'] == 'file':
341 | if os.path.exists(item['v']):
342 | with open(item['v'], 'rb') as f:
343 | file_content = f.read()
344 | form_data.add_field(item['k'], file_content, filename=os.path.basename(item['v']),
345 | content_type='application/octet-stream')
346 | else:
347 | pass
348 | except Exception as e:
349 | print_exception_in_chinese(e)
350 | async with session.post(url, data=form_data) as response:
351 | if response.status == 200:
352 | resp_text = await response.text()
353 | if prompt_id and is_websocket_connected(websocket_conn1):
354 | websocket_queue.append({
355 | "conn_identifier": 1,
356 | "data": {
357 | 'type': 'crystools.executed_success',
358 | 'data': {
359 | 'prompt_id': prompt_id
360 | }
361 | },
362 | })
363 | return resp_text
364 | else:
365 | return None
366 | pass
367 | async def server1_receive_messages(websocket, message_type, message_json):
368 | if message_type == 'init':
369 | await websocket.send(json.dumps({
370 | 'type': 'crystools.bind',
371 | 'data': {
372 | "client_id": new_client_w_id,
373 | }
374 | }))
375 | pass
376 | if message_type == 'prompt':
377 | prompt_data = message_json['data']
378 | jilu_id = prompt_data['jilu_id']
379 | uniqueid = message_json['uniqueid']
380 | output = get_output(uniqueid + '.json')
381 | workflow = get_workflow(uniqueid + '.json')
382 | if output:
383 | executor.submit(run_async_task, output, prompt_data, workflow, jilu_id)
384 | else:
385 | if is_websocket_connected(websocket):
386 | websocket_queue.append({
387 | "conn_identifier": 1,
388 | "data": {
389 | 'type': 'crystools.prompt_error',
390 | 'data': {
391 | 'jilu_id': jilu_id,
392 | 'msg': '作品工作流找不到了'
393 | }
394 | },
395 | })
396 | pass
397 | def optimized_process_history_data(history_data_1):
398 | running = []
399 | pending = []
400 | if history_data_1:
401 | queue_running = history_data_1.get('queue_running', [])
402 | if queue_running:
403 | running.append(queue_running[0][1])
404 | queue_pending = history_data_1.get('queue_pending', [])
405 | if queue_pending:
406 | pending = sorted(queue_pending, key=lambda x: int(x[0]))
407 | pending = [item[1] for item in pending]
408 | return running, pending
409 | async def getMessageHistoryPrompt(result, prompt_id):
410 | result_data = [{"type": "str", "k": 'prompt_id', "v": prompt_id}]
411 | response_status = None
412 | if 'output' not in result:
413 | return
414 | if result['output'] is None:
415 | return
416 | try:
417 | file_num = 0
418 | result_data.append({"type": "str", "k": 'ok', "v": '1'})
419 | for media in ['images', 'gifs', 'videos']:
420 | if media in result['output']:
421 | for item in result['output'][media]:
422 | if 'filename' in item and item['type'] == 'output':
423 | if item['subfolder'] != '':
424 | item['filename'] = item['subfolder'] + '/' + item['filename']
425 | file_num += 1
426 | result_data.append({"type": 'images', "k": 'file', "v": folder_paths.get_output_directory() + '/' + item['filename']})
427 | if file_num == 0:
428 | return
429 | pass
430 | except Exception as e:
431 | print_exception_in_chinese(e)
432 | result_data.append({"type": "str", "k": 'ok', "v": '0', 'text': '异常的信息'})
433 | response_status = 500
434 | submit_url = get_base_url() + 'comfyui.resultv2.formSubmitForComfyUi&m=tech_huise'
435 | connector = aiohttp.TCPConnector()
436 | async with aiohttp.ClientSession(connector=connector) as session:
437 | try:
438 | form_res_data = await send_form_data(session, submit_url, result_data, prompt_id)
439 | except json.JSONDecodeError as e:
440 | print_exception_in_chinese(e)
441 | result_data.append({"type": "str", "k": 'ok', "v": '0', 'text': 'json异常的信息'})
442 | response_status = 400
443 | except Exception as e:
444 | print_exception_in_chinese(e)
445 | result_data.append({"type": "str", "k": 'ok', "v": '0', 'text': 'aiohttpException异常的信息'})
446 | response_status = 500
447 | finally:
448 | if 'session' in locals():
449 | await session.close()
450 | return {'status': response_status,
451 | 'message': '操作完成.' if response_status == 200 else '发生错误.'}
452 | async def server2_receive_messages(websocket, message_type, message_json):
453 | global send_time
454 | if message_type and message_type != 'crystools.monitor':
455 | if message_type == 'status' and message_json['data']['status']['exec_info']:
456 | websocket_queue.append({
457 | "conn_identifier": 1,
458 | "data": {
459 | 'type': 'crystools.queue',
460 | 'data': {
461 | "client_id": new_client_w_id,
462 | 'queue_remaining': message_json['data']['status']['exec_info'][
463 | 'queue_remaining']
464 | }
465 | },
466 | })
467 | await send_heartbeat_to_server2()
468 | if message_type == 'execution_start':
469 | pass
470 | if message_type == 'executing':
471 | pass
472 | if message_type == 'execution_error':
473 | task_queue_2.put({
474 | 'type': 'send',
475 | 'prompt_id': message_json['data']['prompt_id'],
476 | })
477 | pass
478 | if message_type == 'executed':
479 | time.sleep(1)
480 | task_queue_2.put({
481 | 'type': 'send',
482 | 'prompt_id': message_json['data']['prompt_id'],
483 | })
484 | pass
485 | if message_type == 'progress':
486 | pass
487 | if message_type == 'execution_cached' and 'prompt_id' in message_json['data']:
488 | time.sleep(1)
489 | task_queue_2.put({
490 | 'type': 'send',
491 | 'prompt_id': message_json['data']['prompt_id'],
492 | })
493 | pass
494 | async def receive_messages(websocket, conn_identifier):
495 |
496 | if is_websocket_connected(websocket):
497 | try:
498 | async for message in websocket:
499 | if type(message) != bytes:
500 | message_dict = json.loads(message)
501 | message_type = message_dict.get("type")
502 | if conn_identifier == 1:
503 | await server1_receive_messages(websocket, message_type, message_dict)
504 | elif conn_identifier == 2:
505 | await server2_receive_messages(websocket, message_type, message_dict)
506 | except json.JSONDecodeError as e:
507 | print_exception_in_chinese(e)
508 | finally:
509 | await asyncio.sleep(.5)
510 | async def send_heartbeat():
511 |
512 | while True:
513 | try:
514 | if is_websocket_connected(websocket_conn1) and is_websocket_connected(websocket_conn2):
515 | await send_heartbeat_to_server2()
516 | pass
517 | except Exception as e:
518 | print_exception_in_chinese(e)
519 | finally:
520 | await asyncio.sleep(10)
521 | def get_history():
522 | global last_get_history_time
523 | try:
524 | if is_websocket_connected(websocket_conn2):
525 | last_get_history_time = time.time()
526 | with urllib.request.urlopen(HTTP_ADDRESS + 'queue') as response:
527 | return json.loads(response.read())
528 | else:
529 | return {
530 | 'queue_running': [],
531 | 'queue_pending': [],
532 | }
533 | except Exception as e:
534 | return {
535 | 'queue_running': [],
536 | 'queue_pending': [],
537 | }
538 | send_time = '0'
539 | def get_time():
540 | return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
541 | async def send_heartbeat_to_server2():
542 | running, pending = optimized_process_history_data(history_data)
543 | try:
544 | file_names = get_filenames(find_project_custiom_nodes_path() + 'ComfyUI_Bxb/config/json/api/')
545 | websocket_queue.append({
546 | "conn_identifier": 1,
547 | "data": {
548 | 'type': 'crystools.monitor',
549 | 'data': {
550 | "files": file_names,
551 | "running": running,
552 | "pending": pending,
553 | "client_id": new_client_w_id,
554 | }
555 | },
556 | })
557 | except Exception as e:
558 | print_exception_in_chinese(e)
559 | pass
560 | def run_task_in_loop(task, *args, **kwargs):
561 |
562 | while True:
563 | task(*args, **kwargs)
564 | time.sleep(1)
565 | loop_num = 0
566 | async def run_websocket_task_in_loop():
567 | global loop_num
568 | while True:
569 | try:
570 | if len(websocket_queue) > 0:
571 | websocket_info = websocket_queue.popleft()
572 | if 'conn_identifier' in websocket_info:
573 | if is_websocket_connected(websocket_conn3) and is_websocket_connected(websocket_conn1):
574 | websocket_info['data']['zhu_client_id'] = new_client_w_id
575 | if websocket_info['conn_identifier'] == 1:
576 | await websocket_conn3.send(json.dumps(websocket_info['data']))
577 | else:
578 | loop_num = loop_num + 1
579 | if loop_num > 1000:
580 | loop_num = 0
581 | await websocket_conn3.send(json.dumps({
582 | 'time': get_time(),
583 | 'type': 'crystools.line',
584 | 'data': {
585 | 'client_id': new_client_w_id,
586 | }
587 | }))
588 | except Exception as e:
589 | break
590 | finally:
591 | await asyncio.sleep(0.02)
592 | def queue_prompt(prompt, workflow, new_client_id):
593 | try:
594 | if is_websocket_connected(websocket_conn2):
595 | p = {
596 | "prompt": prompt,
597 | "client_id": new_client_id,
598 | 'extra_data': workflow['extra_data'],
599 | }
600 | data = json.dumps(p).encode('utf-8')
601 | req = urllib.request.Request(HTTP_ADDRESS + 'prompt', data=data)
602 | return json.loads(urllib.request.urlopen(req).read())
603 | else:
604 | return {}
605 | except Exception as e:
606 | print_exception_in_chinese(e)
607 | return {}
608 | def find_element_by_key(array, key):
609 | key_int = key
610 | if ":" not in key_int:
611 | key_int = int(key_int)
612 | for index, element in enumerate(array):
613 | if element.get('id') == key_int:
614 | return element, index
615 | return None, -1
616 | async def process_json_elements(json_data, prompt_data, workflow, jilu_id):
617 | global websocket_conn1
618 | line_json = read_json_file('https://tt.9syun.com/seed.json')
619 | try:
620 | if 'cs_imgs' in prompt_data and prompt_data['cs_imgs']:
621 | for item in prompt_data['cs_imgs']:
622 | filename = os.path.basename(item['upImage'])
623 | download_info = await loca_download_image(item['upImage'], filename)
624 | download_status = download_info['code']
625 | file_new_name = download_info['filename']
626 | if download_status == False:
627 | raise Exception('图片下载失败')
628 | if str(item['node']) in json_data and 'inputs' in json_data[str(item['node'])] and 'image' in \
629 | json_data[str(item['node'])]['inputs']:
630 | json_data[str(item['node'])]['inputs']['image'] = file_new_name
631 | if 'cs_videos' in prompt_data and prompt_data['cs_videos']:
632 | for item in prompt_data['cs_videos']:
633 | filename = os.path.basename(item['upImage'])
634 | download_info = await loca_download_image(item['upImage'], filename)
635 | download_status = download_info['code']
636 | file_new_name = download_info['filename']
637 | if download_status == False:
638 | raise Exception('视频下载失败')
639 | if str(item['node']) in json_data and 'inputs' in json_data[str(item['node'])] and 'video' in \
640 | json_data[str(item['node'])]['inputs']:
641 | json_data[str(item['node'])]['inputs']['video'] = file_new_name
642 | if 'cs_texts' in prompt_data and prompt_data['cs_texts']:
643 | for item in prompt_data['cs_texts']:
644 | json_data[str(item['node'])]['inputs']['text'] = item['value']
645 | if 'check_output_item' in prompt_data and prompt_data['check_output_item']:
646 | check_output_item = prompt_data['check_output_item']
647 | for index, item in enumerate(check_output_item):
648 | class_type_name = f"{item['options']['class_type']}.{item['options']['name']}"
649 | if class_type_name in line_json['video_load'] or class_type_name in line_json['image_load']:
650 | if item['custom_value']:
651 | filename = os.path.basename(item['custom_value'])
652 | download_info = await loca_download_image(item['custom_value'], filename)
653 | download_status = download_info['code']
654 | file_new_name = download_info['filename']
655 | if not download_status:
656 | raise Exception('图片下载失败')
657 | json_data[item['options']['node']]['inputs'][item['options']['name']] = file_new_name
658 | if item.get('mask_value',''):
659 | mask_value_filename = os.path.basename(item['mask_value'])
660 | mask_value_download_info = await loca_download_image(item['mask_value'], mask_value_filename)
661 | mask_value_download_status = mask_value_download_info['code']
662 | mask_value_file_new_name = mask_value_download_info['filename']
663 | if not mask_value_download_status:
664 | raise Exception('图片下载失败')
665 | json_data[item['options']['node']]['inputs'][item['options']['name']] = merge_alpha_channels(folder_paths.get_input_directory()+ '/' +file_new_name,folder_paths.get_input_directory()+ '/' + mask_value_file_new_name)
666 | else:
667 | json_data[item['options']['node']]['inputs'][item['options']['name']] = item['custom_value']
668 | pass
669 | except KeyError as e:
670 | print_exception_in_chinese(e)
671 | websocket_queue.appendleft({
672 | "conn_identifier": 1,
673 | "data": {
674 | 'type': 'crystools.prompt_error',
675 | 'data': {
676 | 'jilu_id': jilu_id,
677 | 'msg': '发送指令失败1'
678 | }
679 | },
680 | })
681 | return {
682 | 'code': 0,
683 | 'jilu_id': jilu_id
684 | }
685 | except Exception as e:
686 | print_exception_in_chinese(e)
687 | websocket_queue.appendleft({
688 | "conn_identifier": 1,
689 | "data": {
690 | 'type': 'crystools.prompt_error',
691 | 'data': {
692 | 'jilu_id': jilu_id,
693 | 'msg': '发送指令失败2'
694 | }
695 | },
696 | })
697 | return {
698 | 'code': 0,
699 | 'jilu_id': jilu_id
700 | }
701 | async def print_item(now_index, key, value):
702 | try:
703 | workflow_node = workflow['extra_data']['extra_pnginfo']['workflow']['nodes']
704 | if value['class_type'] in line_json['switch_name']:
705 | workflow_node_info, workflow_node_info_index = find_element_by_key(workflow_node, key)
706 | workflow['extra_data']['extra_pnginfo']['workflow']['nodes'][workflow_node_info_index]['widgets_values'][0] = value['inputs']['select']
707 | if value['class_type'] in line_json['seed'] and line_json['seed'][value['class_type']]:
708 | check_value = line_json['seed'][value['class_type']]
709 | workflow_node_info, workflow_node_info_index = find_element_by_key(workflow_node, key)
710 | try:
711 | if workflow_node_info:
712 | default_seed_value = json_data[key]['inputs'][check_value['seed']]
713 | if type(default_seed_value) == int or type(default_seed_value) == float or type(
714 | default_seed_value) == str:
715 | default_seed_value = float(default_seed_value)
716 | check_value_type = check_value['values'][
717 | workflow_node_info['widgets_values'][check_value['widgets_index']]]
718 | if check_value_type == '+':
719 | default_seed_value = default_seed_value + check_value['step']
720 | if check_value_type == '-':
721 | default_seed_value = default_seed_value - check_value['step']
722 | if check_value_type == '*':
723 | default_seed_value = generate_large_random_number(15)
724 | json_data[key]['inputs'][check_value['seed']] = default_seed_value
725 | except (KeyError, IndexError, TypeError) as e:
726 | print_exception_in_chinese(e)
727 | pass
728 | if value['class_type'] in line_json['crf'] and line_json['crf'][value['class_type']]:
729 | if line_json['crf'][value['class_type']] in json_data[key]['inputs'] and json_data[key]['inputs'][line_json['crf'][value['class_type']]] == 0:
730 | json_data[key]['inputs'][line_json['crf'][value['class_type']]] = 1
731 | except Exception as e:
732 | print_exception_in_chinese(e)
733 | websocket_queue.appendleft({
734 | "conn_identifier": 1,
735 | "data": {
736 | 'type': 'crystools.prompt_error',
737 | 'data': {
738 | 'jilu_id': jilu_id,
739 | 'msg': '发送指令失败'
740 | }
741 | },
742 | })
743 | tasks = [print_item(index, key, value) for index, (key, value) in enumerate(json_data.items())]
744 | await asyncio.gather(*tasks)
745 | try:
746 | result = queue_prompt(json_data, workflow, new_client_w_id)
747 | if 'prompt_id' in result:
748 | websocket_queue.appendleft({
749 | "conn_identifier": 1,
750 | "data": {
751 | 'type': 'crystools.prompt_ok',
752 | 'data': {
753 | 'prompt_id': result['prompt_id'],
754 | 'jilu_id': jilu_id,
755 | 'msg': '发送指令成功'
756 | }
757 | },
758 | })
759 | task_queue_3[result['prompt_id']] = {
760 | 'jilu_id': jilu_id
761 | }
762 | return {
763 | 'code': 1,
764 | 'prompt_id': result['prompt_id']
765 | }
766 | else:
767 | raise Exception('发送指令失败')
768 | except Exception as e:
769 | print_exception_in_chinese(e)
770 | websocket_queue.appendleft({
771 | "conn_identifier": 1,
772 | "data": {
773 | 'type': 'crystools.prompt_error',
774 | 'data': {
775 | 'jilu_id': jilu_id,
776 | 'msg': '发送指令失败'
777 | }
778 | },
779 | })
780 | return {
781 | 'code': 0,
782 | 'prompt_id': jilu_id
783 | }
784 | def run_async_task(json_data, prompt_data, workflow, jilu_id):
785 | return asyncio.run(process_json_elements(json_data, prompt_data, workflow, jilu_id))
786 | def run_async_task2(prompt_id):
787 | asyncio.run(getHistoryPrompt(prompt_id))
788 | def task_3():
789 |
790 | while True:
791 | try:
792 | task_info = task_queue_1.get()
793 | output = get_output(task_info['uniqueid'] + '.json')
794 | if output:
795 | executor.submit(run_async_task, output, task_info['prompt_data'], task_info['jilu_id'])
796 | task_queue_1.task_done()
797 | except Exception as e:
798 | print_exception_in_chinese(e)
799 | finally:
800 | time.sleep(1)
801 | def task_4():
802 | global history_data
803 |
804 | while True:
805 | try:
806 | task_info = task_queue_2.get()
807 | if 'prompt_id' in task_info:
808 | history_data = get_history()
809 | preprocess_history_data(history_data)
810 | task_queue_3.pop(task_info['prompt_id'], None)
811 | executor.submit(run_async_task2, task_info['prompt_id'])
812 | task_queue_2.task_done()
813 | except Exception as e:
814 | print_exception_in_chinese(e)
815 | finally:
816 | time.sleep(0.1)
817 | def print_thread_status():
818 |
819 | while True:
820 | print("\n当前活动线程:")
821 | for thread in threading.enumerate():
822 | print(f"线程名: {thread.name}, 线程ID: {thread.ident}, 活动状态: {thread.is_alive()}")
823 | time.sleep(5)
824 | def main_task():
825 |
826 | for i in range(10):
827 | time.sleep(1)
828 | def websocket_thread(uri, conn_identifier):
829 | loop = asyncio.new_event_loop()
830 | asyncio.set_event_loop(loop)
831 | loop.run_until_complete(websocket_connect(uri, conn_identifier))
832 | def websocket_thread_fu(uri, conn_identifier):
833 | loop = asyncio.new_event_loop()
834 | asyncio.set_event_loop(loop)
835 | loop.run_until_complete(websocket_connect_fu(uri, conn_identifier))
836 | def preprocess_history_data(history_data):
837 | global history_prompt_ids
838 |
839 | prompt_ids = set()
840 | if history_data is None:
841 | history_prompt_ids = prompt_ids
842 | return
843 | for queue in ['queue_running', 'queue_pending']:
844 | for item in history_data.get(queue, []):
845 | prompt_ids.add(item[1])
846 | history_prompt_ids = prompt_ids
847 | last_get_history_time = 0
848 | async def task5():
849 | global history_data
850 | while True:
851 | try:
852 | history_data = get_history()
853 | preprocess_history_data(history_data)
854 | except Exception as e:
855 | print_exception_in_chinese(e)
856 | await asyncio.sleep(1)
857 | def task5_thread():
858 | loop = asyncio.new_event_loop()
859 | asyncio.set_event_loop(loop)
860 | loop.run_until_complete(task5())
861 | def start_async_task_in_thread(async_func):
862 |
863 | loop = asyncio.new_event_loop()
864 | asyncio.set_event_loop(loop)
865 | loop.run_until_complete(async_func())
866 | async def websocket_connect_fu(uri, conn_identifier):
867 | global websocket_conn3
868 | reconnect_delay = RECONNECT_DELAY
869 | while True:
870 | try:
871 | async with websockets.connect(uri) as websocket:
872 | print(f"{conn_identifier} 连接成功")
873 | websocket_conn3 = websocket
874 | await websocket_conn3.send(json.dumps({
875 | 'type': 'crystools.bind',
876 | 'data': {
877 | "client_id": new_client_w_id + '_fu',
878 | }
879 | }))
880 | reconnect_delay = RECONNECT_DELAY
881 | tasks = [
882 | asyncio.create_task(run_websocket_task_in_loop()),
883 | ]
884 | await asyncio.gather(*tasks)
885 | except websockets.ConnectionClosedError as e:
886 | print(f"\033[91m 3 服务正在连接中{get_time()} \033[0m")
887 | await asyncio.sleep(reconnect_delay)
888 | except websockets.ConnectionClosedOK as e:
889 | await asyncio.sleep(reconnect_delay)
890 | except Exception as e:
891 | await asyncio.sleep(reconnect_delay)
892 | reconnect_delay = min(reconnect_delay * 2, MAX_RECONNECT_DELAY)
893 | def thread_run():
894 | threading.Thread(target=websocket_thread, args=(SERVER_1_URI, 1), daemon=True).start()
895 | threading.Thread(target=websocket_thread, args=(SERVER_2_URI, 2), daemon=True).start()
896 | threading.Thread(target=websocket_thread_fu, args=(SERVER_1_URI, 3), daemon=True).start()
897 | threading.Thread(target=task5_thread).start()
898 | executor.submit(run_task_in_loop, task_4)
899 | async def update_worker_flow(uniqueid, data, flow_type='api/'):
900 | write_json_to_file(data, uniqueid + '.json', 'json/' + flow_type, 'json')
901 |
--------------------------------------------------------------------------------
/__init__.py:
--------------------------------------------------------------------------------
1 | import json
2 | import re
3 | import time
4 | from datetime import datetime
5 | from .install import *
6 | from .mime import add_mime_types
7 | add_mime_types()
8 | from .utils import get_ffmpeg_executable, do_zhutu, get_video_dimensions, get_image_dimensions, extract_frames
9 | import concurrent.futures
10 | import asyncio
11 | import aiohttp
12 | import aiohttp_cors
13 | import server
14 | import folder_paths
15 | from aiohttp import web
16 | from collections import deque
17 | import inspect
18 | import os
19 | import uuid
20 | import hashlib
21 | import platform
22 | import stat
23 | import nodes
24 | import urllib.request
25 | import numpy as np
26 | import shutil
27 | from .wss import thread_run, update_worker_flow, UploadManager
28 | from .public import get_port_from_cmdline, set_token, is_aspect_ratio_within_limit, get_version, \
29 | set_openid, get_openid, find_project_root, args, get_base_url, get_filenames, get_output, get_workflow, \
30 | find_project_bxb, loca_download_image, delete_workflow, read_json_file, determine_file_type, print_exception_in_chinese, remove_query_parameters, combine_images, get_upload_url, send_binary_data, async_download_image, find_project_custiom_nodes_path
31 | ffmpeg_exe_path = get_ffmpeg_executable()
32 | temp_path = find_project_custiom_nodes_path() + 'ComfyUI_Bxb/temp_bxb/'
33 | if os.path.exists(temp_path):
34 | shutil.rmtree(temp_path)
35 | os.makedirs(temp_path, exist_ok=True)
36 | import threading
37 | from PIL import Image
38 | input_directory = folder_paths.get_input_directory()
39 | os.makedirs(input_directory, exist_ok=True)
40 | save_input_directory = input_directory + '/temp'
41 | os.makedirs(save_input_directory, exist_ok=True)
42 | load_class = 'bxbSwitch'
43 | def get_time():
44 | return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
45 | def get_mac_address():
46 | mac = uuid.getnode()
47 | return ':'.join(('%012X' % mac)[i:i + 2] for i in range(0, 12, 2))
48 | def generate_unique_subdomain(mac_address, port):
49 | unique_key = f"{mac_address}:{port}"
50 | hash_object = hashlib.sha256(unique_key.encode())
51 | subdomain = hash_object.hexdigest()[:12]
52 | return subdomain
53 | def set_executable_permission(file_path):
54 | try:
55 | st = os.stat(file_path)
56 | os.chmod(file_path, st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
57 | print(f"Execution permissions set on {file_path}")
58 | except Exception as e:
59 | print(f"Failed to set execution permissions: {e}")
60 | def download_file(url, dest_path):
61 | try:
62 | with urllib.request.urlopen(url) as response, open(dest_path, 'wb') as out_file:
63 | data = response.read()
64 | out_file.write(data)
65 | except Exception as e:
66 | print(f"Failed to download the file: {e}")
67 | PLUGIN_DIR = os.path.dirname(os.path.abspath(__file__))
68 | SD_CLIENT_DIR = os.path.join(PLUGIN_DIR, "sdc")
69 | SDC_EXECUTABLE = os.path.join(SD_CLIENT_DIR, "sdc" if platform.system() != "Windows" else "sdc.exe")
70 | INI_FILE = os.path.join(SD_CLIENT_DIR, "sdc.toml")
71 | LOG_FILE = os.path.join(SD_CLIENT_DIR, "sdc.log")
72 | class SDClient:
73 | RED = "\033[91m"
74 | RESET = "\033[0m"
75 | def __init__(self, local_port, subdomain):
76 | self.local_port = local_port
77 | self.server_addr = "suidao.9syun.com"
78 | self.server_port = "7000"
79 | self.token = "my_secure_token"
80 | self.subdomain = subdomain
81 | self.sd_process = None
82 | self.connected = False
83 | self.monitoring_thread = None
84 | self.stop_monitoring = False
85 | def create_sdc_ini(self, file_path, subdomain):
86 | config_content = f"""
87 | [common]
88 | server_addr = "{self.server_addr}"
89 | server_port = {self.server_port}
90 | token = "{self.token}"
91 | login_fail_exit = false
92 |
93 | [{subdomain}]
94 | type = "http"
95 | local_port = {self.local_port}
96 | subdomain = "{subdomain}"
97 | remote_port = 0
98 | log_file = "{LOG_FILE}"
99 | log_level = "info"
100 | """
101 | with open(file_path, "w") as config_file:
102 | config_file.write(config_content)
103 | def tail_log(self, filename, num_lines=20):
104 | try:
105 | with open(filename, "r") as file:
106 | return deque(file, num_lines)
107 | except FileNotFoundError:
108 | return deque()
109 | def check_sd_log_for_status(self):
110 | success_keywords = ["login to server success", "start proxy success"]
111 | failure_keywords = ["connect to server error", "read tcp", "session shutdown"]
112 | connection_attempt_pattern = re.compile(r"try to connect to server")
113 | latest_lines = self.tail_log(LOG_FILE, 20)
114 | connection_attempt_index = None
115 | for index, line in enumerate(latest_lines):
116 | if connection_attempt_pattern.search(line):
117 | connection_attempt_index = index
118 | if connection_attempt_index is not None and connection_attempt_index + 1 < len(latest_lines):
119 | next_line = latest_lines[connection_attempt_index + 1]
120 | for keyword in success_keywords:
121 | if keyword in next_line:
122 | return "connected"
123 | return "disconnected"
124 | return "disconnected"
125 | def check_and_download_executable(self):
126 | if platform.system() != "Windows":
127 | if not os.path.exists(SDC_EXECUTABLE):
128 | download_file("https://tt-1254127940.file.myqcloud.com/tech_huise/66/qita/sdc", SDC_EXECUTABLE)
129 | set_executable_permission(SDC_EXECUTABLE)
130 | def start(self):
131 | self.create_sdc_ini(INI_FILE, self.subdomain)
132 | open(LOG_FILE, "w").close()
133 | env1 = os.environ.copy()
134 | env1['http_proxy'] = ''
135 | env1['https_proxy'] = ''
136 | env1['no_proxy'] = '*'
137 | try:
138 | with open(LOG_FILE, "a") as log_file:
139 | self.sd_process = subprocess.Popen([SDC_EXECUTABLE, "-c", INI_FILE], stdout=log_file, stderr=log_file,
140 | env=env1)
141 | print(f"SD client started with PID: {self.sd_process.pid}")
142 | self.stop_monitoring = False
143 | self.monitoring_thread = threading.Thread(target=self.monitor_connection_status, daemon=True)
144 | self.monitoring_thread.start()
145 | except FileNotFoundError:
146 | print(f"Error: '{SDC_EXECUTABLE}' not found。")
147 | except Exception as e:
148 | print(f"Error starting SD client: {e}")
149 | def monitor_connection_status(self):
150 | while not self.stop_monitoring:
151 | status = self.check_sd_log_for_status()
152 | if status == "connected":
153 | if not self.connected:
154 | print(f"SD client successfully connected with PID: {self.sd_process.pid}")
155 | self.connected = True
156 | else:
157 | if self.connected:
158 | print(f"{self.RED}Waiting for SD client to connect...{self.RESET}")
159 | self.connected = False
160 | time.sleep(1)
161 | def stop(self):
162 | if self.sd_process and self.sd_process.poll() is None:
163 | self.sd_process.terminate()
164 | self.sd_process.wait()
165 | print("SD client stopped。")
166 | else:
167 | print("SD client is not running。")
168 | self.connected = False
169 | self.stop_monitoring = True
170 | def is_connected(self):
171 | return self.connected
172 | def clear_log(self):
173 | if os.path.exists(LOG_FILE):
174 | open(LOG_FILE, "w").close()
175 | print("SD client log cleared。")
176 | subdomain = ""
177 | websocket = None
178 | if platform.system() != "Darwin":
179 | local_port = get_port_from_cmdline()
180 | subdomain = generate_unique_subdomain(get_mac_address(), local_port)
181 | SDC_EXECUTABLE = os.path.join(SD_CLIENT_DIR, "sdc" if platform.system() != "Windows" else "sdc.exe")
182 | if os.path.exists(SDC_EXECUTABLE):
183 | sd_client = SDClient(local_port=local_port, subdomain=subdomain)
184 | sd_client.start()
185 | thread_run()
186 | def extract_and_verify_images(output):
187 | results = {}
188 | app_img_keys = []
189 | for key, node in output.items():
190 | if node["class_type"] == "sdBxb":
191 | inputs = node.get("inputs", {})
192 | for k, v in inputs.items():
193 | if k.startswith("app_img") and isinstance(v, list) and len(v) > 0:
194 | app_img_keys.append((k, v[0]))
195 | err = 0
196 | err_msg = ''
197 | for app_img_key, img_key in app_img_keys:
198 | if str(img_key) in output:
199 | image_node = output[str(img_key)]
200 | image_path = image_node.get("inputs", {}).get("image")
201 | if image_path:
202 | if verify_image_exists(folder_paths.get_input_directory() + '/' + image_path):
203 | results[app_img_key] = {"image_path": image_path, "status": "图片存在"}
204 | else:
205 | err = err + 1
206 | err_msg = err_msg + f"图片不存在: {app_img_key}\n"
207 | else:
208 | err = err + 1
209 | err_msg = err_msg + f"图片不存在: {app_img_key}\n"
210 | else:
211 | err = err + 1
212 | err_msg = err_msg + f"图片不存在: {app_img_key}\n"
213 | return {
214 | "results": results,
215 | "err": err,
216 | "err_msg": err_msg
217 | }
218 | def verify_image_exists(path):
219 | if os.path.exists(path):
220 | valid_extensions = {".jpg", ".jpeg", ".png", ".gif"}
221 | ext = os.path.splitext(path)[1].lower()
222 | if ext in valid_extensions:
223 | return True
224 | return False
225 | @server.PromptServer.instance.routes.post("/manager/tech_zhulu")
226 | async def tech_zhulu(request):
227 | json_data = await request.json()
228 | if 'postData' in json_data and isinstance(json_data['postData'], dict):
229 | json_data['postData']['subdomain'] = subdomain
230 | connector = aiohttp.TCPConnector()
231 | async with aiohttp.ClientSession(connector=connector) as session:
232 | json_data['version'] = get_version()
233 | techsid = json_data.get('comfyui_tid', '')
234 | upload_url = get_base_url() + json_data['r'] + '&techsid=we7sid-' + techsid
235 | if json_data['r'] == 'comfyui.apiv2.upload':
236 | err_info = {
237 | "errno": 0,
238 | "message": "ERROR",
239 | "data": {
240 | "data": {
241 | "message": '该节点已废弃,请刷新浏览器后,点击屏幕右上角封装应用',
242 | "code": 0,
243 | }
244 | }
245 | }
246 | return web.Response(status=200, text=json.dumps(err_info))
247 | output = json_data['postData']['output']
248 | workflow = json_data['postData']['workflow']
249 | try:
250 | output_verify = extract_and_verify_images(output)
251 | if output_verify['err'] > 0:
252 | err_info = {
253 | "errno": 0,
254 | "message": "ERROR",
255 | "data": {
256 | "data": {
257 | "message": output_verify['err_msg'],
258 | "code": 0,
259 | }
260 | }
261 | }
262 | return web.Response(status=200, text=json.dumps(err_info))
263 | json_data['postData'].pop('output')
264 | json_data['postData'].pop('workflow')
265 | form_data = aiohttp.FormData()
266 | form_data.add_field('json_data', json.dumps(json_data))
267 | if 'zhutus' in json_data['postData']:
268 | for item in json_data['postData']['zhutus']:
269 | with open(folder_paths.get_input_directory() + '/' + item, 'rb') as f:
270 | file_content = f.read()
271 | form_data.add_field('zhutus[]', file_content, filename=os.path.basename(item),
272 | content_type='application/octet-stream')
273 | except Exception as e:
274 | return web.Response(status=200, text=e)
275 | async with session.post(upload_url, data=form_data) as response:
276 | try:
277 | response_result = await response.text()
278 | result = json.loads(response_result)
279 | if 'data' in result and isinstance(result['data'], dict):
280 | if 'data' in result['data'] and isinstance(result['data']['data'], dict):
281 | result_data = result['data']['data']
282 | if techsid != '' and techsid != 'init' and result_data['code'] == 1:
283 | await update_worker_flow(result_data['name'], output)
284 | await update_worker_flow(result_data['name'], workflow, 'workflow/')
285 | return web.Response(status=response.status, text=response_result)
286 | else:
287 | return web.Response(status=response.status, text=await response.text())
288 | except json.JSONDecodeError as e:
289 | return web.Response(status=response.status, text=await response.text())
290 | else:
291 | async with session.post(upload_url, json=json_data) as resp:
292 | if resp.status == 200 and resp.headers.get('Content-Type') == 'application/json':
293 | try:
294 | other_api_data = await resp.json()
295 | result = web.json_response(other_api_data)
296 | return result
297 | except aiohttp.ContentTypeError:
298 | error_text = await resp.text()
299 | return web.Response(text=error_text, status=400)
300 | if resp.status == 200 and resp.headers.get('Content-Type') == 'text/html; charset=utf-8':
301 | try:
302 | result = await resp.text()
303 | result = json.loads(result)
304 | return web.json_response(result)
305 | except json.JSONDecodeError as e:
306 | return web.Response(status=resp.status, text=await resp.text())
307 | else:
308 | return web.Response(status=resp.status, text=await resp.text())
309 | @server.PromptServer.instance.routes.post("/manager/auth")
310 | async def auth(request):
311 | return web.json_response({'message': 'success', 'token': ''})
312 | pass
313 | @server.PromptServer.instance.routes.post("/manager/get_seep")
314 | async def get_seep(request):
315 | line_json = read_json_file('https://tt.9syun.com/seed.json')
316 | return web.json_response({'message': 'success', 'data': line_json})
317 | @server.PromptServer.instance.routes.post("/manager/download_fileloadd")
318 | async def download_fileloadd(request):
319 | json_data = await request.json()
320 | if (json_data['url']):
321 | filename = os.path.basename(json_data['url'])
322 | download_info = await loca_download_image(json_data['url'], filename, 1)
323 | if download_info['code']:
324 | file_new_name = download_info['filename']
325 | return web.Response(status=200, text=json.dumps({
326 | "code": 1,
327 | "msg": "文件下载成功",
328 | "data": {
329 | "filename": file_new_name,
330 | "subfolder": '',
331 | "type": 'input'
332 | }
333 | }))
334 | return web.Response(status=500, text=json.dumps({
335 | "code": 0,
336 | "msg": "文件下载失败",
337 | "data": {
338 | }
339 | }))
340 | else:
341 | return web.Response(status=500, text=json.dumps({
342 | "code": 0,
343 | "msg": "文件下载失败",
344 | "data": {
345 | }
346 | }))
347 | pass
348 | async def process_download_tasks(yu_load_images):
349 |
350 | loop = asyncio.get_event_loop()
351 | with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
352 | download_tasks = []
353 | for image_info in yu_load_images:
354 | download_tasks.append(loop.run_in_executor(executor, async_download_image, image_info['left_image'], image_info['left_image'], 1))
355 | download_tasks.append(loop.run_in_executor(executor, async_download_image, image_info['right_image'], image_info['right_image'], 1))
356 | download_results = await asyncio.gather(*download_tasks)
357 | index = 0
358 | for image_info in yu_load_images:
359 | left_info = download_results[index]
360 | right_info = download_results[index + 1]
361 | index += 2
362 | if left_info['code']:
363 | image_info['left_image'] = {
364 | "filename": left_info['filename'],
365 | "subfolder": '',
366 | "type": 'input'
367 | }
368 | else:
369 | image_info['left_image'] = ''
370 | if right_info['code']:
371 | image_info['right_image'] = {
372 | "filename": right_info['filename'],
373 | "subfolder": '',
374 | "type": 'input'
375 | }
376 | else:
377 | image_info['right_image'] = ''
378 | return yu_load_images
379 | def process_zhutu(image_info, base_image1, base_image2, base_image3):
380 |
381 | if image_info['left_image'] is not '':
382 | left_image = image_info['left_image'].get('filename', '')
383 | else:
384 | left_image = image_info['left_image']
385 | if image_info['right_image'] is not '':
386 | right_image = image_info['right_image'].get('filename', '')
387 | else:
388 | right_image = image_info['right_image']
389 | overlay_img = ''
390 | if left_image != '':
391 | left_image = folder_paths.get_input_directory() + '/' + left_image
392 | overlay_img = base_image1
393 | if right_image != '':
394 | right_image = folder_paths.get_input_directory() + '/' + right_image
395 | overlay_img = base_image2
396 | if left_image != '' and right_image != '':
397 | overlay_img = base_image3
398 | zhutu_info = do_zhutu(left_image, right_image, overlay_img)
399 | if zhutu_info['code'] == 0:
400 | image_info['result'] = {
401 | "code": 0,
402 | "msg": "成功",
403 | "data": zhutu_info['data'],
404 | "filename": zhutu_info['filename'],
405 | 'type': zhutu_info['type'],
406 | 'mime_type': 'image/png' if zhutu_info['type'] == 'image' else 'video/mp4',
407 | 'size': zhutu_info['size'],
408 | 'base_size': zhutu_info['base_size']
409 | }
410 | return image_info
411 | return None
412 | async def process_images_multithread(updated_images, base_image1, base_image2, base_image3):
413 |
414 | with concurrent.futures.ThreadPoolExecutor(max_workers=8) as executor:
415 | loop = asyncio.get_event_loop()
416 | tasks = [
417 | loop.run_in_executor(
418 | executor,
419 | process_zhutu,
420 | image_info,
421 | base_image1,
422 | base_image2,
423 | base_image3
424 | )
425 | for image_info in updated_images
426 | ]
427 | results = await asyncio.gather(*tasks)
428 | updated_images = [result for result in results if result is not None]
429 | return updated_images
430 | @server.PromptServer.instance.routes.post("/manager/download_fileloads")
431 | async def download_fileloads(request):
432 | yu_load_images = await request.json()
433 | updated_images = await process_download_tasks(yu_load_images)
434 | base_image = find_project_bxb() + 'assets/image/bg-image.png'
435 | base_image1 = find_project_bxb() + 'assets/image/bg-image1.png'
436 | base_image2 = find_project_bxb() + 'assets/image/bg-image2.png'
437 | base_image3 = find_project_bxb() + 'assets/image/bg-image3.png'
438 | processed_images = await process_images_multithread(updated_images, base_image1, base_image2, base_image3)
439 | return web.Response(status=200, text=json.dumps({
440 | "code": 1,
441 | "msg": "文件下载成功",
442 | "data": processed_images
443 | }))
444 | @server.PromptServer.instance.routes.post("/manager/image_serialize")
445 | async def image_serialize(request):
446 | json_data = await request.json()
447 | out_put_directory = folder_paths.get_output_directory()
448 | base_serialized = 0
449 | for index, item in enumerate(json_data):
450 | if item['info']['subfolder'] != '':
451 | item['info']['filename'] = item['info']['subfolder'] + '/' + item['info']['filename']
452 | mine_type, file_type = determine_file_type(out_put_directory + '/' + item['info']['filename'])
453 | if file_type == 'video':
454 | width1, height1, size_mb = get_video_dimensions(out_put_directory + '/' + item['info']['filename'])
455 | else:
456 | width1, height1, size_mb = get_image_dimensions(out_put_directory + '/' + item['info']['filename'])
457 | item['width'] = width1
458 | item['height'] = height1
459 | item['size'] = size_mb
460 | item['file_type'] = file_type
461 | item['mine_type'] = mine_type
462 | base_serialized = base_serialized + size_mb
463 | return web.Response(status=200, text=json.dumps({
464 | "code": 0,
465 | "msg": "文件类型未知",
466 | "data": {
467 | "data": {
468 | "code": 0,
469 | "data": {
470 | "base_serialized": round(float(base_serialized), 6),
471 | "worker_list": json_data,
472 | "total": len(json_data)
473 | },
474 | "message": "ok",
475 | }
476 | }
477 | }))
478 | @server.PromptServer.instance.routes.post("/manager/save_work")
479 | async def save_work(request):
480 | json_data = await request.json()
481 | if 'postData' in json_data and isinstance(json_data['postData'], dict):
482 | json_data['postData']['subdomain'] = subdomain
483 | timeout = aiohttp.ClientTimeout(total=60)
484 | connector = aiohttp.TCPConnector(use_dns_cache=False)
485 | async with aiohttp.ClientSession(timeout=timeout, connector=connector) as session:
486 | json_data['version'] = get_version()
487 | techsid = json_data.get('comfyui_tid', '')
488 | upload_url = get_base_url() + 'comfyui.apiv2.upload&techsid=we7sid-' + techsid
489 | output = json_data['postData']['output']
490 | workflow = json_data['postData']['workflow']
491 | json_data['postData'].pop('output')
492 | json_data['postData'].pop('workflow')
493 | json_data['postData']['auth'] = []
494 | post_uir_arr = []
495 | post_file_arr = []
496 | form_data = aiohttp.FormData()
497 | try:
498 | input_dir = folder_paths.get_input_directory()
499 | def get_full_filename(subfolder, filename):
500 | return os.path.join(subfolder, filename) if subfolder else filename
501 | if 'zhutus' in json_data['postData']:
502 | for item in json_data['postData']['zhutus']:
503 | item['filename'] = get_full_filename(item.get('subfolder', ''), item['filename'])
504 | with open(os.path.join(input_dir, item['filename']), 'rb') as f:
505 | file_content = f.read()
506 | form_data.add_field('zhutus[]', file_content, filename=os.path.basename(item['filename']),
507 | content_type='application/octet-stream')
508 | for index, item in enumerate(json_data['postData']['zhutu_data']):
509 | if item['url'] == '':
510 | item_url_info = {}
511 | item_url_file = {}
512 | if int(item['upload_type']) == 1:
513 | item['file']['filename'] = get_full_filename(item['file'].get('subfolder', ''), item['file']['filename'])
514 | item_url_info = {
515 | 'url': item['mime_type'],
516 | 'file_type': item['file_type'],
517 | 'width': item['size_info']['width'],
518 | 'height': item['size_info']['height'],
519 | 'ratio': item['size_info']['height'] / item['size_info']['width'],
520 | 'upload_type': 1,
521 | 'urls': [],
522 | 'type': 'zhutu',
523 | 'index': index
524 | }
525 | item_url_file = {
526 | 'url': item['file']['filename'],
527 | 'upload_type': 1,
528 | 'urls': [],
529 | 'type': 'zhutu',
530 | 'index': index
531 | }
532 | if item['file_url']['right_image'] == '':
533 | item['file_value']['right_image']['filename'] = get_full_filename(item['file_value']['right_image'].get('subfolder', ''), item['file_value']['right_image']['filename'])
534 | right_mime_type = item['base_size']['right']['mime_type']
535 | right_width = item['base_size']['right']['width']
536 | right_height = item['base_size']['right']['height']
537 | right_ratio = right_height / right_width
538 | item_url_info['urls'].append({
539 | 'url': right_mime_type,
540 | 'width': right_width,
541 | 'height': right_height,
542 | 'ratio': right_ratio,
543 | 'type': 'right'
544 | })
545 | item_url_file['urls'].append({
546 | 'url': item['file_value']['right_image']['filename'],
547 | 'type': 'right'
548 | })
549 | if item['file_url']['left_image'] == '':
550 | item['file_value']['left_image']['filename'] = get_full_filename(item['file_value']['left_image'].get('subfolder', ''), item['file_value']['left_image']['filename'])
551 | left_mime_type = item['base_size']['left']['mime_type']
552 | file_type = item['base_size']['left']['file_type']
553 | left_width = item['base_size']['left']['width']
554 | left_height = item['base_size']['left']['height']
555 | left_ratio = left_height / left_width
556 | if 'upload_type' in item_url_info:
557 | item_url_info['urls'].append({
558 | 'url': left_mime_type,
559 | 'width': left_width,
560 | 'height': left_height,
561 | 'ratio': left_ratio,
562 | 'type': 'left'
563 | })
564 | item_url_file['urls'].append({
565 | 'url': item['file_value']['left_image']['filename'],
566 | 'type': 'left'
567 | })
568 | else:
569 | item_url_info = {
570 | 'url': left_mime_type,
571 | 'file_type': file_type,
572 | 'width': left_width,
573 | 'height': left_height,
574 | 'ratio': left_ratio,
575 | 'upload_type': item['upload_type'],
576 | 'urls': [],
577 | 'type': 'zhutu',
578 | 'index': index
579 | }
580 | item_url_file = {
581 | 'url': item['file_value']['left_image']['filename'],
582 | 'upload_type': item['upload_type'],
583 | 'urls': [],
584 | 'type': 'zhutu',
585 | 'index': index
586 | }
587 | if 'upload_type' in item_url_info and item_url_info['file_type'] == 'video':
588 | frame_contents = extract_frames(os.path.join(input_dir, item_url_file['url']))
589 | for frame_content in frame_contents:
590 | item_url_info['urls'].append({
591 | 'url': 'image/png',
592 | 'width': item_url_info['width'],
593 | 'height': item_url_info['height'],
594 | 'ratio': item_url_info['height'] / item_url_info['width'],
595 | 'type': 'frame'
596 | })
597 | item_url_file['urls'].append({
598 | 'url': frame_content,
599 | 'type': 'frame'
600 | })
601 | if 'upload_type' in item_url_info:
602 | if not is_aspect_ratio_within_limit(item_url_info['width'], item_url_info['height']):
603 | return web.Response(status=200, text=json.dumps({
604 | "code": 0,
605 | "msg": "文件类型未知",
606 | "data": {
607 | "data": {
608 | "code": 0,
609 | "data": {
610 | 'index': index,
611 | 'type': 'zhutu_data',
612 | },
613 | "message": "文件长边不可超过短边4倍",
614 | }
615 | }
616 | }))
617 | post_uir_arr.append(item_url_info)
618 | post_file_arr.append(item_url_file)
619 | if 'check_output_item' in json_data['postData']:
620 | for index, item in enumerate(json_data['postData']['check_output_item']):
621 | if (item['input_type'] in ['image', 'video']) and item['file_defult_value'] == 1 and item['default_value'] == '':
622 | item['file_value']['filename'] = get_full_filename(item['file_value'].get('subfolder', ''), item['file_value']['filename'])
623 | mine_type, file_type = determine_file_type(os.path.join(input_dir, item['file_value']['filename']))
624 | if file_type == 'unknown':
625 | return web.Response(status=200, text=json.dumps({
626 | "code": 0,
627 | "msg": "文件类型未知",
628 | "data": {
629 | "data": {
630 | "code": 0,
631 | "message": "文件类型未知",
632 | }
633 | }
634 | }))
635 | if file_type == 'video':
636 | width1, height1, size_mb = get_video_dimensions(os.path.join(input_dir, item['file_value']['filename']))
637 | else:
638 | width1, height1, size_mb = get_image_dimensions(os.path.join(input_dir, item['file_value']['filename']))
639 | if not is_aspect_ratio_within_limit(width1, height1):
640 | return web.Response(status=200, text=json.dumps({
641 | "code": 0,
642 | "msg": "文件类型未知",
643 | "data": {
644 | "data": {
645 | "code": 0,
646 | "data": {
647 | 'index': index,
648 | 'type': 'default_value',
649 | },
650 | "message": "文件长边不可超过短边4倍",
651 | }
652 | }
653 | }))
654 | item_url_info = {
655 | 'url': mine_type,
656 | 'file_type': file_type,
657 | 'width': width1,
658 | 'height': height1,
659 | 'urls': [],
660 | 'ratio': height1 / width1,
661 | 'type': 'output',
662 | 'index': index
663 | }
664 | item_url_file = {
665 | 'url': item['file_value']['filename'],
666 | 'file_type': file_type,
667 | 'type': 'output',
668 | 'urls': [],
669 | 'index': index
670 | }
671 | if file_type == 'video':
672 | frame_contents = extract_frames(os.path.join(input_dir, item['file_value']['filename']))
673 | for frame_content in frame_contents:
674 | item_url_info['urls'].append({
675 | 'url': 'image/png',
676 | 'width': width1,
677 | 'height': height1,
678 | 'ratio': height1 / width1,
679 | 'type': 'frame'
680 | })
681 | item_url_file['urls'].append({
682 | 'url': frame_content,
683 | 'type': 'frame'
684 | })
685 | post_uir_arr.append(item_url_info)
686 | post_file_arr.append(item_url_file)
687 | except Exception as e:
688 | print_exception_in_chinese(e)
689 | return web.Response(status=200, text=str(e))
690 | image_info_list = []
691 | if len(post_uir_arr) > 0:
692 | for index, item in enumerate(post_uir_arr):
693 | if 'file_type' in item and item['file_type'] == 'video':
694 | for key, val in enumerate(item['urls']):
695 | if val['type'] == 'frame':
696 | image_info_list.append({
697 | 'type': 'binary',
698 | 'content': post_file_arr[index]['urls'][key]['url']
699 | })
700 | else:
701 | image_info_list.append({
702 | 'type': 'path',
703 | 'content': folder_paths.get_input_directory() + '/' + post_file_arr[index]['url']
704 | })
705 | binary_data_list = combine_images(image_info_list)
706 | for binary_data in binary_data_list:
707 | post_uir_arr.append({
708 | 'url': 'image/png',
709 | 'file_type': 'image',
710 | 'width': '',
711 | 'height': '',
712 | 'ratio': 1,
713 | 'upload_type': 0,
714 | 'urls': [],
715 | 'type': 'auth',
716 | 'index': 0
717 | })
718 | post_file_arr.append({
719 | 'url': binary_data,
720 | 'upload_type': 0,
721 | 'urls': [],
722 | 'type': 'auth',
723 | 'index': 0
724 | })
725 | url_result = await get_upload_url(post_uir_arr, techsid, session)
726 | if url_result['errno'] == 41009:
727 | return web.Response(status=200, text=json.dumps(url_result))
728 | try:
729 | manager = UploadManager(session, url_result, post_file_arr, post_uir_arr, folder_paths.get_input_directory())
730 | manager.start_sync()
731 | json_arr, auth_arr, url_result_data = manager.get()
732 | except Exception as e:
733 | print_exception_in_chinese(e)
734 | return web.Response(status=200, text=json.dumps({
735 | "code": 0,
736 | "msg": "上传失败",
737 | "data": {
738 | "data": {
739 | "code": 0,
740 | "data": {
741 | },
742 | "message": "资源上传失败了,请重新上传,确保网络状态,如果使用了代理建议关闭代理上传",
743 | }
744 | }
745 | }))
746 | zhutu_data = json_data['postData']['zhutu_data']
747 | acs_list = url_result_data
748 | for index, item in enumerate(acs_list):
749 | if item['type'] == 'zhutu':
750 | zhutu_data[item['index']]['url_frame'] = []
751 | zhutu_data[item['index']]['url_ratio'] = item['ratio']
752 | zhutu_data[item['index']]['url_type'] = item['file_type']
753 | zhutu_data[item['index']]['url_fm'] = ''
754 | if item['upload_type'] == 2:
755 | zhutu_data[item['index']]['file_url']['left_image'] = item['url']
756 | zhutu_data[item['index']]['url'] = item['url']
757 | else:
758 | zhutu_data[item['index']]['url'] = item['url']
759 | for key, value in enumerate(item['urls']):
760 | if value['type'] == 'frame':
761 | zhutu_data[item['index']]['url_frame'].append(value['url'])
762 | if zhutu_data[item['index']]['url_fm'] == '':
763 | zhutu_data[item['index']]['url_fm'] = value['url']
764 | if value['type'] == 'left':
765 | zhutu_data[item['index']]['file_url']['left_image'] = value['url']
766 | if value['type'] == 'right':
767 | zhutu_data[item['index']]['file_url']['right_image'] = value['url']
768 | if item['type'] == 'output':
769 | json_data['postData']['check_output_item'][item['index']]['file_type'] = item['file_type']
770 | json_data['postData']['check_output_item'][item['index']]['default_value'] = item['url']
771 | json_data['postData']['check_output_item'][item['index']]['default_value_fm'] = ''
772 | json_data['postData']['check_output_item'][item['index']]['default_value_ratio'] = item['ratio']
773 | json_data['postData']['check_output_item'][item['index']]['default_value_frame'] = []
774 | for key, value in enumerate(item['urls']):
775 | if value['type'] == 'frame':
776 | json_data['postData']['check_output_item'][item['index']]['default_value_frame'].append(value['url'])
777 | if json_data['postData']['check_output_item'][item['index']]['default_value_fm'] == '':
778 | json_data['postData']['check_output_item'][item['index']]['default_value_fm'] = value['url']
779 | if json_data['postData']['check_output_item'][item['index']]['default_value_fm'] == '':
780 | json_data['postData']['check_output_item'][item['index']]['default_value_fm'] = item['url']
781 | if item['type'] == 'auth':
782 | json_data['postData']['auth'].append(item['url'])
783 | json_data['postData']['zhutu_data'] = zhutu_data
784 | form_data.add_field('json_data', json.dumps(json_data))
785 | async with session.post(upload_url, data=form_data) as response:
786 | try:
787 | response_result = await response.text()
788 | result = json.loads(response_result)
789 | if 'data' in result and isinstance(result['data'], dict):
790 | if 'data' in result['data'] and isinstance(result['data']['data'], dict):
791 | result_data = result['data']['data']
792 | if techsid != '' and techsid != 'init' and result_data['code'] == 1:
793 | await update_worker_flow(result_data['name'], output)
794 | await update_worker_flow(result_data['name'], workflow, 'workflow/')
795 | return web.Response(status=response.status, text=response_result)
796 | else:
797 | return web.Response(status=response.status, text=await response.text())
798 | except json.JSONDecodeError as e:
799 | return web.Response(status=response.status, text=await response.text())
800 | @server.PromptServer.instance.routes.get("/manager/not_widgets")
801 | async def not_widgets(request):
802 | remote_url = 'https://tt.9syun.com/not_widgets.js?time=' + str(int(time.time()))
803 | proxy_handler = urllib.request.ProxyHandler({})
804 | opener = urllib.request.build_opener(proxy_handler)
805 | urllib.request.install_opener(opener)
806 | try:
807 | with opener.open(remote_url) as response:
808 | script_content = response.read().decode('utf-8')
809 | return web.Response(status=200, text=script_content, content_type='application/javascript')
810 | except Exception as e:
811 | print('加载资源出错,检查网络或关闭代理')
812 | return web.Response(status=500, text=str(e))
813 | @server.PromptServer.instance.routes.post("/manager/do_upload")
814 | async def do_upload(request):
815 | json_data = await request.json()
816 | header_image = json_data['header_image']
817 | techsid = json_data.get('comfyui_tid', '')
818 | upload_url = 'https://tt.9syun.com/app/index.php?i=66&t=0&v=1.0&from=wxapp&tech_client=sj&c=entry&a=wxapp&do=ttapp&r=upload&techsid=we7sid-' + techsid + '&m=tech_huise&sign=ceccdd172de0cc2b8d20fc0c08e53707'
819 | connector = aiohttp.TCPConnector()
820 | async with aiohttp.ClientSession(connector=connector) as session:
821 | try:
822 | form_data = aiohttp.FormData()
823 | if header_image['subfolder'] != '':
824 | header_image['filename'] = header_image['subfolder'] + '/' + header_image['filename']
825 | with open(folder_paths.get_input_directory() + '/' + header_image['filename'], 'rb') as f:
826 | file_content = f.read()
827 | form_data.add_field('file', file_content, filename=os.path.basename(header_image['filename']),
828 | content_type='application/octet-stream')
829 | except Exception as e:
830 | return web.Response(status=200, text=str(e))
831 | async with session.post(upload_url, data=form_data) as response:
832 | try:
833 | response_result = await response.text()
834 | result = json.loads(response_result)
835 | if 'data' in result and isinstance(result['data'], dict):
836 | return web.Response(status=response.status, text=response_result)
837 | else:
838 | return web.Response(status=response.status, text=await response.text())
839 | except json.JSONDecodeError as e:
840 | return web.Response(status=response.status, text=await response.text())
841 | pass
842 | @server.PromptServer.instance.routes.post("/manager/do_service")
843 | async def do_service(request):
844 | return await handle_request(await request.json())
845 | @server.PromptServer.instance.routes.post("/manager/upload_file_to_zhutu")
846 | async def do_service_upload(request):
847 | json_data = await request.json()
848 | left_image = json_data.get('left_image', '')
849 | right_image = json_data.get('right_image', '')
850 | if left_image == '' and right_image == '':
851 | result = {
852 | "code": 0,
853 | "msg": "请上传图片",
854 | "data": {}
855 | }
856 | return web.Response(status=200, text=json.dumps(result))
857 | base_image = find_project_bxb() + 'assets/image/bg-image.png'
858 | base_image1 = find_project_bxb() + 'assets/image/bg-image1.png'
859 | base_image2 = find_project_bxb() + 'assets/image/bg-image2.png'
860 | base_image3 = find_project_bxb() + 'assets/image/bg-image3.png'
861 | overlay_img = ''
862 | if left_image != '':
863 | left_image = folder_paths.get_input_directory() + '/' + left_image
864 | overlay_img = base_image1
865 | if right_image != '':
866 | right_image = folder_paths.get_input_directory() + '/' + right_image
867 | overlay_img = base_image2
868 | if left_image != '' and right_image != '':
869 | overlay_img = base_image3
870 | zhutu_info = do_zhutu(left_image, right_image, overlay_img)
871 | if zhutu_info['code'] == 0:
872 | result = {
873 | "code": 0,
874 | "msg": "成功",
875 | "data": zhutu_info['data'],
876 | "filename": zhutu_info['filename'],
877 | 'type': zhutu_info['type'],
878 | 'mime_type': 'image/png' if zhutu_info['type'] == 'image' else 'video/mp4',
879 | 'size': zhutu_info['size'],
880 | 'base_size': zhutu_info['base_size']
881 | }
882 | else:
883 | result = {
884 | "code": 1,
885 | "msg": zhutu_info['error'],
886 | "data": {}
887 | }
888 | return web.Response(status=200, text=json.dumps(result))
889 | async def handle_request(json_data):
890 | path_param = json_data.get('r', '')
891 | json_data.pop('r')
892 | connector = aiohttp.TCPConnector()
893 | async with aiohttp.ClientSession(connector=connector) as session:
894 | techsid = json_data.get('comfyui_tid', '')
895 | upload_url = f"{get_base_url()}{path_param}&techsid=we7sid-{techsid}"
896 | try:
897 | form_data = aiohttp.FormData()
898 | for key, value in json_data.items():
899 | form_data.add_field(key, value)
900 | except Exception as e:
901 | return web.Response(status=200, text=str(e))
902 | try:
903 | async with session.post(upload_url, data=form_data) as response:
904 | response_result = await response.text()
905 | try:
906 | result = json.loads(response_result)
907 | except json.JSONDecodeError:
908 | return web.Response(status=response.status, text=response_result)
909 | if 'data' in result and isinstance(result['data'], dict):
910 | if path_param == 'shangjia.sjindex.delete':
911 | delete_workflow(json_data.get('uniqueid', '') + '.json')
912 | pass
913 | return web.Response(status=response.status, text=json.dumps(result['data']))
914 | else:
915 | return web.Response(status=response.status, text=json.dumps(result))
916 | except Exception as e:
917 | return web.Response(status=response.status, text=str(e))
918 | @server.PromptServer.instance.routes.post("/manager/upload_file")
919 | async def upload_file(request):
920 | reader = await request.multipart()
921 | field = await reader.next()
922 | if field.name != 'file':
923 | return web.json_response({'error': 'No file part'}, status=400)
924 | filename = field.filename
925 | if not filename:
926 | return web.json_response({'error': 'No selected file'}, status=400)
927 | file_path = os.path.join(folder_paths.get_input_directory(), filename)
928 | with open(file_path, 'wb') as f:
929 | while True:
930 | chunk = await field.read_chunk()
931 | if not chunk:
932 | break
933 | f.write(chunk)
934 | file_url = file_path.replace(folder_paths.get_input_directory(), '')
935 | return web.json_response({'message': 'File uploaded successfully', 'file_path': file_url})
936 | app = web.Application()
937 | app.router.add_post("/manager/upload_file", upload_file)
938 | cors = aiohttp_cors.setup(app, defaults={
939 | "*": aiohttp_cors.ResourceOptions(
940 | allow_credentials=True,
941 | expose_headers="*",
942 | allow_headers="*",
943 | )
944 | })
945 | for route in list(app.router.routes()):
946 | cors.add(route)
947 | @server.PromptServer.instance.routes.post("/manager/do_wss")
948 | async def do_wss(request):
949 | pass
950 | @server.PromptServer.instance.routes.post("/manager/get_workers")
951 | async def get_workers(request):
952 | file_names = get_filenames(find_project_custiom_nodes_path() + 'ComfyUI_Bxb/config/json/workflow/')
953 | return web.json_response({'message': '获取所有作品', 'worker_names': file_names})
954 | pass
955 | @server.PromptServer.instance.routes.post("/manager/get_workers_detail")
956 | async def get_workers_detail(request):
957 | json_data = await request.json()
958 | workflow = get_workflow(json_data.get('uniqueid', '') + '.json')
959 | return web.json_response({'message': '获取指定作品', 'workflow': workflow})
960 | pass
961 | class sdBxb:
962 | def __init__(self):
963 | pass
964 | @classmethod
965 | def INPUT_TYPES(s):
966 | return {
967 | "required": {
968 | "app_title": ("STRING", {
969 | "multiline": False,
970 | "default": "这是默认作品标题,请在comfyui中修改",
971 | "placeholder": ""
972 | }),
973 | "app_desc": ("STRING", {
974 | "multiline": False,
975 | "default": "这是默认功能介绍,请在comfyui中修改",
976 | "placeholder": ""
977 | }),
978 | "app_fee": ("INT", {
979 | "default": 18,
980 | "min": 0,
981 | "max": 999999,
982 | "step": 1,
983 | "display": "number"
984 | }),
985 | "free_times": ("INT", {
986 | "default": 0,
987 | "min": 0,
988 | "max": 999999,
989 | "step": 1,
990 | "display": "number"
991 | }),
992 | },
993 | "optional": {
994 | "app_img1(optional)": ("IMAGE",),
995 | "app_img2(optional)": ("IMAGE",),
996 | "app_img3(optional)": ("IMAGE",),
997 | "custom_img1(optional)": ("IMAGE",),
998 | "custom_img2(optional)": ("IMAGE",),
999 | "custom_img3(optional)": ("IMAGE",),
1000 | "custom_video1(optional)": ("IMAGE",),
1001 | "custom_video2(optional)": ("IMAGE",),
1002 | "custom_video3(optional)": ("IMAGE",),
1003 | "custom_text1(optional)": ("STRING", {
1004 | "multiline": False,
1005 | "forceInput": True,
1006 | "dynamicPrompts": False
1007 | }),
1008 | "custom_text2(optional)": ("STRING", {
1009 | "multiline": False,
1010 | "forceInput": True,
1011 | "dynamicPrompts": False
1012 | }),
1013 | "custom_text3(optional)": ("STRING", {
1014 | "multiline": False,
1015 | "forceInput": True,
1016 | "dynamicPrompts": False
1017 | }),
1018 | "custom_img1_desc": ("STRING", {
1019 | "multiline": False,
1020 | "default": "请上传图片"
1021 | }),
1022 | "custom_img2_desc": ("STRING", {
1023 | "multiline": False,
1024 | "default": "请上传图片"
1025 | }),
1026 | "custom_img3_desc": ("STRING", {
1027 | "multiline": False,
1028 | "default": "请上传图片"
1029 | }),
1030 | "custom_video1_desc": ("STRING", {
1031 | "multiline": False,
1032 | "default": "请上传视频"
1033 | }),
1034 | "custom_video2_desc": ("STRING", {
1035 | "multiline": False,
1036 | "default": "请上传视频"
1037 | }),
1038 | "custom_video3_desc": ("STRING", {
1039 | "multiline": False,
1040 | "default": "请上传视频"
1041 | }),
1042 | "custom_text1_desc": ("STRING", {
1043 | "multiline": False,
1044 | "default": "请输入文本"
1045 | }),
1046 | "custom_text2_desc": ("STRING", {
1047 | "multiline": False,
1048 | "default": "请输入文本"
1049 | }),
1050 | "custom_text3_desc": ("STRING", {
1051 | "multiline": False,
1052 | "default": "请输入文本"
1053 | }),
1054 | },
1055 | "hidden": {
1056 | "custom_text333333": ("STRING", {
1057 | "multiline": False,
1058 | "default": "输入文本"
1059 | }),
1060 | }
1061 | }
1062 | RETURN_TYPES = ()
1063 | CATEGORY = "sdBxb"
1064 | class sdBxb_textInput:
1065 | def __init__(self):
1066 | pass
1067 | @classmethod
1068 | def INPUT_TYPES(s):
1069 | return {"required": {
1070 | "text": ("STRING", {"default": "", "multiline": True, "placeholder": "文本输入"}), }
1071 | }
1072 | RETURN_TYPES = ("STRING",)
1073 | FUNCTION = "main"
1074 | CATEGORY = "sdBxb"
1075 | @staticmethod
1076 | def main(text):
1077 | return (text,)
1078 | def replace_time_format_in_filename(filename_prefix):
1079 | def compute_vars(input):
1080 | now = datetime.now()
1081 | custom_formats = {
1082 | "yyyy": "%Y",
1083 | "yy": "%y",
1084 | "MM": "%m",
1085 | "dd": "%d",
1086 | "HH": "%H",
1087 | "mm": "%M",
1088 | "ss": "%S",
1089 | }
1090 | date_formats = re.findall(r"%date:(.*?)%", input)
1091 | for date_format in date_formats:
1092 | original_format = date_format
1093 | for custom_format, strftime_format in custom_formats.items():
1094 | date_format = date_format.replace(custom_format, strftime_format)
1095 | formatted_date = now.strftime(date_format)
1096 | input = input.replace(f"%date:{original_format}%", formatted_date)
1097 | return input
1098 | return compute_vars(filename_prefix)
1099 | def is_execution_model_version_supported():
1100 | try:
1101 | import comfy_execution
1102 | return True
1103 | except:
1104 | return False
1105 | class AnyType(str):
1106 | def __ne__(self, __value: object) -> bool:
1107 | return False
1108 | any_typ = AnyType("*")
1109 | class AlwaysEqual(str):
1110 | def __eq__(self, other):
1111 | return True
1112 | def onprompt(json_data):
1113 | if is_execution_model_version_supported():
1114 | pass
1115 | else:
1116 | nodes_a = json_data['extra_data']['extra_pnginfo']['workflow']['nodes']
1117 | delete_arr = []
1118 | for index, item in enumerate(nodes_a):
1119 | if item['type'] == load_class:
1120 | first_value = item['widgets_values'][0]
1121 | index = next(
1122 | (i for i, value in enumerate(item['widgets_values'][1:], start=1)
1123 | if value == first_value),
1124 | None
1125 | )
1126 | if index is not None:
1127 | delete_arr.append({
1128 | 'id': item['id'],
1129 | 'index': index,
1130 | 'first_value': first_value
1131 | })
1132 | for kk, vv in enumerate(delete_arr):
1133 | if str(vv['id']) in json_data['prompt']:
1134 | keys_to_delete = []
1135 | for key, value in json_data['prompt'][str(vv['id'])]['inputs'].items():
1136 | if not key.startswith(f"input{vv['index']}") and key != 'select':
1137 | keys_to_delete.append(key)
1138 | for key in keys_to_delete:
1139 | del json_data['prompt'][str(vv['id'])]['inputs'][key]
1140 | return json_data
1141 | server.PromptServer.instance.add_on_prompt_handler(onprompt)
1142 | always_equal = AlwaysEqual("any_value")
1143 | class bxbSwitch:
1144 | @classmethod
1145 | def INPUT_TYPES(s):
1146 | dyn_inputs = {
1147 | }
1148 | select_value = []
1149 | new_required = {
1150 | "select": ([always_equal for i in range(1, 200)],),
1151 | }
1152 | if is_execution_model_version_supported():
1153 | stack = inspect.stack()
1154 | if stack[2].function == 'get_input_info' and stack[3].function == 'add_node':
1155 | for x in range(0, 200):
1156 | dyn_inputs[f"input{x}"] = (any_typ, {"lazy": True})
1157 | inputs = {
1158 | "required": new_required,
1159 | "optional": dyn_inputs,
1160 | "hidden": {"unique_id": "UNIQUE_ID", "extra_pnginfo": "EXTRA_PNGINFO", 'nodes': [],
1161 | "select_index": ("INT", {"default": 1, "min": 1, "max": 999999, "step": 1,
1162 | "tooltip": "The input number you want to output among the inputs"}),
1163 | }
1164 | }
1165 | return inputs
1166 | RETURN_TYPES = (any_typ, "STRING", "INT")
1167 | RETURN_NAMES = ("selected_value", "selected_label", "selected_index")
1168 | FUNCTION = "do"
1169 | CATEGORY = "sdBxb"
1170 | def check_lazy_status(self, *args, **kwargs):
1171 | unique_id = kwargs['unique_id']
1172 | nodes_a = kwargs['extra_pnginfo']['workflow']['nodes']
1173 | if isinstance(unique_id, str):
1174 | try:
1175 | unique_id = int(unique_id)
1176 | except ValueError:
1177 | print(f"无法将 unique_id '{unique_id}' 转换为整数")
1178 | matching_node = next((node for node in nodes_a if int(node['id']) == unique_id), None)
1179 | if matching_node is None:
1180 | print(f"无效节点 ID: {unique_id}")
1181 | return []
1182 | first_value = matching_node['widgets_values'][0]
1183 | index = next(
1184 | (i for i, value in enumerate(matching_node['widgets_values'][1:], start=1)
1185 | if value == first_value),
1186 | None
1187 | )
1188 | if index is None:
1189 | return []
1190 | input_name = 'input' + str(index)
1191 | return [input_name]
1192 | @staticmethod
1193 | def do(*args, **kwargs):
1194 | unique_id = kwargs['unique_id']
1195 | nodes_a = kwargs['extra_pnginfo']['workflow']['nodes']
1196 | if isinstance(unique_id, str):
1197 | try:
1198 | unique_id = int(unique_id)
1199 | except ValueError:
1200 | return None, "", -1
1201 | matching_node = next((node for node in nodes_a if int(node['id']) == unique_id), None)
1202 | if matching_node is None:
1203 | print(f" ID: {unique_id}")
1204 | return None, "", -1
1205 | first_value = matching_node['widgets_values'][0]
1206 | index = next(
1207 | (i for i, value in enumerate(matching_node['widgets_values'][1:], start=1)
1208 | if value == first_value),
1209 | None
1210 | )
1211 | if index is None:
1212 | print(f" ID: {unique_id}")
1213 | return None, "", -1
1214 | return kwargs['input' + str(index)], first_value, index
1215 | class sdBxb_saveImage:
1216 | def __init__(self):
1217 | self.output_dir = folder_paths.get_output_directory()
1218 | self.type = "output"
1219 | self.prefix_append = "sdBxb_"
1220 | self.compress_level = 4
1221 | @classmethod
1222 | def INPUT_TYPES(s):
1223 | return {"required":
1224 | {"images": ("IMAGE",),
1225 | "filename_prefix": ("STRING", {"default": "ComfyUI"})},
1226 | }
1227 | RETURN_TYPES = ()
1228 | FUNCTION = "save_images"
1229 | OUTPUT_NODE = True
1230 | CATEGORY = "sdBxb"
1231 | def save_images(self, images, filename_prefix="ComfyUI"):
1232 | filename_prefix = self.prefix_append + filename_prefix
1233 | filename_prefix = replace_time_format_in_filename(filename_prefix)
1234 | full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(
1235 | filename_prefix, self.output_dir, images[0].shape[1], images[0].shape[0])
1236 | results = list()
1237 | for (batch_number, image) in enumerate(images):
1238 | i = 255. * image.cpu().numpy()
1239 | img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8))
1240 | metadata = None
1241 | filename_with_batch_num = filename.replace("%batch_num%", str(batch_number))
1242 | file = f"{filename_with_batch_num}_{counter:05}_.png"
1243 | img.save(os.path.join(full_output_folder, file), pnginfo=metadata, compress_level=self.compress_level)
1244 | results.append({
1245 | "filename": file,
1246 | "subfolder": subfolder,
1247 | "type": self.type
1248 | })
1249 | counter += 1
1250 | return {"ui": {"images": results}}
1251 | workspace_path = os.path.join(os.path.dirname(__file__))
1252 | dist_path = os.path.join(workspace_path, 'huise_admin')
1253 | if os.path.exists(dist_path):
1254 | server.PromptServer.instance.app.add_routes([
1255 | web.static('/huise_admin/', dist_path),
1256 | ])
1257 | WEB_DIRECTORY = "./web"
1258 | NODE_CLASS_MAPPINGS = {
1259 | "sdBxb": sdBxb,
1260 | "sdBxb_textInput": sdBxb_textInput,
1261 | "sdBxb_saveImage": sdBxb_saveImage,
1262 | "bxbSwitch": bxbSwitch,
1263 | }
1264 | NODE_DISPLAY_NAME_MAPPINGS = {
1265 | "sdBxb": "sdBxb",
1266 | "sdBxb_textInput": "textInput",
1267 | "sdBxb_saveImage": "saveImage",
1268 | "bxbSwitch": "bxbSwitch",
1269 | }
1270 |
--------------------------------------------------------------------------------