├── .gitignore
├── LICENSE
├── README.md
├── README_EN.md
├── __init__.py
├── assets
├── 1.png
├── 10.png
├── 11.png
├── 12.png
├── 13.png
├── 14.png
├── 2.png
├── 3.png
├── 4.png
├── 5.png
├── 6.png
├── 7.png
├── 8.png
└── 9.png
├── fonts
└── put_fontfile_here
├── func.py
├── nodes
├── __init__.py
├── addAudio.py
├── addImgWatermark.py
├── addTextWatermark.py
├── extractAudio.py
├── frames2video.py
├── imageCopy.py
├── imagePath2Tensor.py
├── imagesSave.py
├── loadImageFromDir.py
├── mergingVideoByPlenty.py
├── mergingVideoByTwo.py
├── multiCuttingVideo.py
├── pipVideo.py
├── singleCuttingVideo.py
├── stitchingVideo.py
├── video2frames.py
├── videoFlip.py
└── videoTransition.py
├── nodes_map.py
└── requirements.txt
/.gitignore:
--------------------------------------------------------------------------------
1 | /__pycache__
2 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
ComfyUI-FFmpeg
2 |
3 |
4 |
中文 | English
5 |
6 |
7 |
8 | ## 介绍
9 |
10 | 把FFmpeg常用功能封装成ComfyUI节点,方便用户可以在ComfyUI上也可以进行各种视频处理。
11 |
12 | ## 说明
13 |
14 | 使用该节点之前需要先安装FFmpeg,FFmpeg安装方法可以参考 [这里](https://www.bilibili.com/read/cv28108185/?spm_id_from=333.999.0.0&jump_opus=1)
15 |
16 | ## 安装
17 |
18 | #### 方法1:
19 |
20 | 1. 进入节点目录, `ComfyUI/custom_nodes/`
21 | 2. `git clone https://github.com/MoonHugo/ComfyUI-FFmpeg.git`
22 | 3. `cd ComfyUI-FFmpeg`
23 | 4. `pip install -r requirements.txt`
24 | 5. 重启ComfyUI
25 |
26 | #### 方法2:
27 | 直接下载节点源码包,然后解压到custom_nodes目录下,最后重启ComfyUI
28 |
29 | #### 方法3:
30 | 通过ComfyUI-Manager安装,搜索“ComfyUI-FFmpeg”进行安装
31 |
32 | ## 节点介绍
33 |
34 | ##### Video2Frames节点: 作用是将视频转为一张一张的图片,并保存到指定目录中
35 |
36 | 
37 |
38 | ###### 参数说明
39 | **video_path**: 本地视频路径,比如:`C:\Users\Desktop\222.mp4`
40 | **output_path**: 输出图片保存路径,比如:`C:\Users\Desktop\output`
41 | **frames_max_width**: 这个参数可以用来缩放视频,默认为0,表示不缩放视频,如果frames_max_width大于视频实际宽度,则视频不会被放大,保持原宽度,如果frames_max_width小于视频实际宽度,则视频会被缩小。
42 |
43 | ___
44 |
45 | ##### Frames2Video节点: 作用是将图片转为视频,并保存到指定目录中
46 | 
47 |
48 | ###### 参数说明
49 | **frame_path**: 本地图片路径,比如:`C:\Users\Desktop\output`
50 | **fps**: 视频帧率,默认为`30`
51 | **video_name**: 保存视频名称,比如:`222.mp4`
52 | **output_path**: 视频保存路径,比如:`C:\Users\Desktop\output`
53 | **audio_path**: 视频音频路径,比如:`C:\Users\Desktop\222.mp3`
54 | ___
55 |
56 | ##### AddTextWatermark节点: 作用是在视频上添加文字水印
57 |
58 | 
59 |
60 | ###### 参数说明
61 | **video_path**: 本地视频路径,比如:`C:\Users\Desktop\222.mp4`
62 | **output_path**: 视频保存路径,比如:`C:\Users\Desktop\output`
63 | **font_file**: 字体文件,需要把字体文件放到`custom_nodes\ComfyUI-FFmpeg\fonts`目录下,不仅英文字体,中文字体也可以,比如:`ComfyUI\custom_nodes\ComfyUI-FFmpeg\fonts\Alibaba-PuHuiTi-Heavy.ttf`
64 | **font_size**: 水印文字大小,比如:`40`
65 | **font_color**: 水印文字颜色,比如:`#FFFFFF`或者`white`
66 | **position_x**: 水印文字x坐标,比如:`100`
67 | **position_y**: 水印文字y坐标,比如:`100`
68 |
69 | ___
70 |
71 | ##### AddImgWatermark节点: 作用是在视频上添加图片水印
72 |
73 | 
74 |
75 | ###### 参数说明
76 | **video_path**: 本地视频路径,比如:`C:\Users\Desktop\222.mp4`
77 | **output_path**: 视频保存路径,比如:`C:\Users\Desktop\output`
78 | **watermark_image**: 水印图片路径,比如:`C:\Users\Desktop\watermark.png`
79 | **watermark_img_width**: 水印图片宽度,比如:`100`
80 | **position_x**: 水印图片在视频中的x坐标,比如:`100`
81 | **position_y**: 水印图片在视频中的y坐标,比如:`100`
82 | ___
83 |
84 | ##### VideoFlip节点: 作用是翻转视频
85 |
86 | 
87 |
88 | ###### 参数说明
89 | **video_path**: 本地视频路径,比如:`C:\Users\Desktop\222.mp4`
90 | **output_path**: 视频保存路径,比如:`C:\Users\Desktop\output`
91 | **flip_type**: 翻转类型,比如:`horizontal`水平翻转,`vertical`垂直翻转,`both`水平加垂直翻转
92 |
93 | ___
94 |
95 | ##### ExtractAudio节点:作用是提取视频中的音频
96 |
97 | 
98 |
99 | ###### 参数说明
100 | **video_path**: 本地视频路径,比如:`C:\Users\Desktop\222.mp4`
101 | **output_path**: 音频保存路径,比如:`C:\Users\Desktop\output`
102 | **audio_format**: 保存音频格式,包括 **.m4a**,**.mp3**,**.wav**,**.aac**,**.flac**,**.wma**,**.ogg**,**.ac3**,**.amr**,**.aiff**,**.opus**,**.m4b**,**.caf**,**.dts** 等等。
103 | ___
104 |
105 | ##### MergingVideoByTwo节点: 作用是合并两个视频,比如把两个一小时的视频合并成一个时长为2小时的视频
106 |
107 | 
108 |
109 | ###### 参数说明
110 | **video1_path**: 视频路径,比如:`C:\Users\Desktop\111.mp4`
111 | **video2_path**: 视频路径,比如:`C:\Users\Desktop\222.mp4`
112 | **device**: 分为CPU和GPU,如果你用CPU合并两个视频出错的话,可以尝试用GPU。
113 | **resolution_reference**: 合并后的视频尺寸是多少,可以参考第一个视频或者第二个视频,即video1或者video2。
114 | **output_path**: 视频保存路径,比如:`C:\Users\Desktop\output`
115 |
116 | ___
117 |
118 | ##### MergingVideoByPlenty节点: 作用是把多个编码格式、分辨率、帧率都一样的短视频合并成长视频
119 |
120 | 
121 |
122 | ###### 参数说明
123 | **video_path**: 视频路径,比如:`C:\Users\Desktop\111`,要求该路径下所有视频的编码格式、帧率以及分辨率一样。
124 | **output_path**: 视频保存路径,比如:`C:\Users\Desktop\output`
125 | ___
126 |
127 | ##### StitchingVideo节点: 作用是拼接两个视频,分成水平拼接和垂直拼接两种拼接方式
128 |
129 | 
130 |
131 | ###### 参数说明
132 | **video1_path**: 视频路径,比如:`C:\Users\Desktop\111.mp4`
133 | **video2_path**: 视频路径,比如:`C:\Users\Desktop\222.mp4`
134 | **device**: 分为CPU和GPU,如果你用CPU拼接两个视频出错的话,可以尝试用GPU。
135 | **use_audio**: 拼接后的视频使用哪个视频的音频,可以选择第一个视频的音频或者第二个视频的音频,即video1或者video2。
136 | **stitching_type**: 拼接视频方式,分为水平拼接(horizontal)和垂直拼接(vertical)两种方式。
137 | **output_path**: 视频保存路径,比如:`C:\Users\Desktop\output`
138 | **scale_and_crop**: 是否缩放裁剪成video1的尺寸。
139 |
140 | ___
141 |
142 | ##### MultiCuttingVideo节点: 作用是把一个视频切割成若干个视频
143 |
144 | 
145 |
146 | ###### 参数说明
147 | **video_path**: 视频路径,比如:`C:\Users\Desktop\111.mp4`
148 | **output_path**: 视频保存路径,比如:`C:\Users\Desktop\output`
149 | **segment_time**: 切割的每个视频长度,单位为秒,需要注意的是,它是根据关键帧切割视频的,所以时间不能太短。因为不能保证每一段视频都有关键帧,所以每一段视频时长不一定都一样,只是最接近的。
150 |
151 | ___
152 |
153 | ##### SingleCuttingVideo节点: 作用是切割指定视频中某个时间段的视频
154 |
155 | 
156 |
157 | ###### 参数说明
158 | **video_path**: 视频路径,比如:`C:\Users\Desktop\111.mp4`
159 | **output_path**: 视频保存路径,比如:`C:\Users\Desktop\output`
160 | **start_time**: 设置切割的开始时间点,设置为00:00:10的话就表示从视频中的第10秒开始切割。
161 | **end_time**: 设置切割的结束时间点,设置为00:05:00,表示切割到视频中的第5分钟为止。
162 |
163 | ___
164 |
165 | ##### AddAudio节点: 作用是给视频添加音频
166 |
167 | 
168 |
169 | ###### 参数说明
170 | **video_path**: 视频路径,比如:`C:\Users\Desktop\111.mp4`
171 | **audio_from**: 音频来源,可以来源于音频文件,也可以来源于视频文件,即audio_file、video_file。
172 | **file_path**: 如果audio_from为audio_file,这里就填入一个音频文件路径,如果audio_from为video_file,那么这里就填入一个视频文件路径,比如:`C:\Users\Desktop\111.mp3` 或者 `C:\Users\Desktop\111.mp4`
173 | **delay_play**: 音频延迟播放时间,单位为秒,默认值为0。
174 | **output_path**: 视频保存路径,比如:`C:\Users\Desktop\output`
175 |
176 | ___
177 |
178 | ##### PipVideo节点: 给视频增加画中画功能
179 |
180 | 
181 |
182 | ###### 参数说明
183 | **video1_path**: 画中画背景视频,比如:`C:\Users\Desktop\111.mp4`
184 | **video2_path**: 画中画前景视频,比如:`C:\Users\Desktop\222.mp4`
185 | **device**: 分为CPU和GPU,如果你用CPU出错的话,可以尝试用GPU。
186 | **use_audio**: 最终视频使用哪个视频的音频,可以选择第一个视频的音频或者第二个视频的音频,即video1或者video2。
187 | **use_duration**: 使用哪个视频的时长作为最终视频的时长,可以选择第一个视频或者第二个视频,即video1或者video2。
188 | **align_type**: 前景视频在背景视频中的位置,有top-left、top-right、bottom-left、bottom-right、center这些选项。
189 | **pip_fg_zoom**: 画中画背景缩放系数,越大前景画面越小,值为背景宽高的缩小倍数。
190 | **output_path**: 视频保存路径,比如:`C:\Users\Desktop\output`
191 | **scale_and_crop**: 缩放和裁剪比例。
192 | **fps**: 最终视频的帧率,默认为30.0,设置为0将使用video2的帧率,设置为1为将使用video2的帧率。
193 | **is_chromakey**: 是否进行绿幕去背景。
194 |
195 | ___
196 |
197 | ##### VideoTransition节点: 给两个视频添加转场过渡动画效果
198 |
199 | 
200 |
201 | ###### 参数说明
202 | **video1_path**: 视频路径,比如:`C:\Users\Desktop\111.mp4`
203 | **video2_path**: 视频路径,比如:`C:\Users\Desktop\222.mp4`
204 | **reference_video**: 参考视频是哪个视频,决定了输出视频的尺寸和帧率。
205 | **device**: 分为CPU和GPU,如果你用CPU出错的话,可以尝试用GPU。
206 | **transition**: 转场特效名称,默认有'fade', 'wipeleft', 'wiperight', 'wipeup', 'wipedown','slideleft', 'slideright', 'slideup', 'slidedown','circlecrop', 'rectcrop', 'distance', 'fadeblack', 'fadewhite','radial', 'smoothleft', 'smoothright', 'smoothup', 'smoothdown','circleopen', 'circleclose', 'vertopen', 'vertclose','horzopen', 'horzclose', 'dissolve', 'pixelize','diagtl', 'diagtr', 'diagbl', 'diagbr','hlslice', 'hrslice', 'vuslice', 'vdslice','hblur', 'fadegrays', 'wipetl', 'wipetr', 'wipebl', 'wipebr','squeezeh', 'squeezev', 'zoomin', 'fadefast', 'fadeslow','hlwind', 'hrwind', 'vuwind', 'vdwind','coverleft', 'coverright', 'coverup', 'coverdown','revealleft', 'revealright', 'revealup', 'revealdown',可以执行命令:ffmpeg -hide_banner -h filter=xfade 查看你本地ffmpeg版本可用的转场效果。
207 | **transition_duration**: 转场持续时间,单位秒,最大值为3秒,不能小于0.1秒。
208 | **offset**: 转场开始时间,单位秒,不能大于等于video1的时长减去转场持续时间。
209 | **output_path**: 视频保存路径,比如:`C:\Users\Desktop\output`
210 |
211 | ___
212 |
213 |
214 | ## 社交账号
215 | - Bilibili:[我的B站主页](https://space.bilibili.com/1303099255)
216 |
217 | ## 感谢
218 |
219 | 感谢FFmpeg仓库的所有作者 [FFmpeg/FFmpeg](https://github.com/FFmpeg/FFmpeg)
220 |
221 | ## 关注历史
222 |
223 | [](https://star-history.com/#MoonHugo/ComfyUI-FFmpeg&Date)
--------------------------------------------------------------------------------
/README_EN.md:
--------------------------------------------------------------------------------
1 | ComfyUI-FFmpeg
2 |
3 |
4 |
English | 中文
5 |
6 |
7 |
8 | ## Introduction
9 |
10 | Encapsulate the commonly used functions of FFmpeg into ComfyUI nodes, making it convenient for users to perform various video processing tasks within ComfyUI.
11 |
12 | ## Tips
13 |
14 | You need to install FFmpeg before using this node, the FFmpeg installation method can refer to [here](https://www.bilibili.com/read/cv28108185/?spm_id_from=333.999.0.0&jump_opus=1)
15 |
16 | ## Installation
17 |
18 | #### Method 1:
19 |
20 | 1. Go to comfyUI custom_nodes folder, `ComfyUI/custom_nodes/`
21 | 2. `git clone https://github.com/MoonHugo/ComfyUI-FFmpeg.git`
22 | 3. `cd ComfyUI-FFmpeg`
23 | 4. `pip install -r requirements.txt`
24 | 5. restart ComfyUI
25 |
26 | #### Method 2:
27 | Directly download the node source package, then extract it into the custom_nodes directory, and finally restart ComfyUI.
28 |
29 | #### Method 3:
30 | Install through ComfyUI-Manager by searching for 'ComfyUI-BiRefNet-Hugo' and installing it.
31 |
32 | ## Nodes introduction
33 |
34 | ##### Video2Frames Node: The function is to convert a video into images and save them to a specified directory.
35 |
36 | 
37 |
38 | ###### Parameter Description
39 | **video_path**: the local video path, e.g.:`C:\Users\Desktop\222.mp4`
40 | **output_path**: the path to save the output images, e.g.:`C:\Users\Desktop\output`
41 | **frames_max_width**: this parameter can be used to resize the video. The default value is 0, which means the video will not be resized. If frames_max_width is larger than the actual width of the video, the video will not be enlarged and will retain its original width. If frames_max_width is smaller than the actual width of the video, the video will be scaled down.
42 |
43 | ___
44 |
45 | ##### Frames2Video Node: The function is to convert images into a video and save it to a specified directory.
46 | 
47 |
48 | ###### Parameter Description
49 | **frame_path**: local image path, e.g.:`C:\Users\Desktop\output`
50 | **fps**: video frame rate, default is`30`
51 | **video_name**: saved video name, e.g.:`222.mp4`
52 | **output_path**: video save path,e.g.:`C:\Users\Desktop\output`
53 | **audio_path**: video audio path,e.g.:`C:\Users\Desktop\222.mp3`
54 | ___
55 |
56 | ##### AddTextWatermark Node: The function is to add a text watermark to the video.
57 |
58 | 
59 |
60 | ###### Parameter Description
61 | **video_path**: local video path,e.g.:`C:\Users\Desktop\222.mp4`
62 | **output_path**: video save path,e.g.:`C:\Users\Desktop\output`
63 | **font_file**: font file: The font file needs to be placed in the`custom_nodes\ComfyUI-FFmpeg\fonts` directory. Not only English fonts, but Chinese fonts can also be used.,e.g.:`ComfyUI\custom_nodes\ComfyUI-FFmpeg\fonts\Alibaba-PuHuiTi-Heavy.ttf`
64 | **font_size**: watermark text size,e.g.:`40`
65 | **font_color**: watermark text color,e.g.:`#FFFFFF` or `white`
66 | **position_x**: watermark text x-coordinate,e.g.:`100`
67 | **position_y**: watermark text y-coordinate,e.g.:`100`
68 |
69 | ___
70 |
71 | ##### AddImgWatermark Node: The function is to add an image watermark to the video.
72 |
73 | 
74 |
75 | ###### Parameter Description
76 | **video_path**: local video path,e.g.:`C:\Users\Desktop\222.mp4`
77 | **output_path**: video save path,e.g.:`C:\Users\Desktop\output`
78 | **watermark_image**: watermark image path,e.g.:`C:\Users\Desktop\watermark.png`
79 | **watermark_img_width**: watermark image width,e.g.:`100`
80 | **position_x**: watermark image x-coordinate in the video,e.g.:`100`
81 | **position_y**: watermark image y-coordinate in the video,e.g.:`100`
82 | ___
83 |
84 | ##### VideoFlip Node: The function is to flip the video
85 |
86 | 
87 |
88 | ###### Parameter Description
89 | **video_path**: local video path,e.g.:`C:\Users\Desktop\222.mp4`
90 | **output_path**: video save path,e.g.:`C:\Users\Desktop\output`
91 | **flip_type**: flip type, e.g. `horizontal` horizontal flip, `vertical` vertical flip, `both` horizontal plus vertical flip
92 |
93 | ___
94 |
95 | ##### ExtractAudio Node:The purpose is to extract the audio from the video
96 |
97 | 
98 |
99 | ###### Parameter Description
100 | **video_path**: local video path,e.g.:`C:\Users\Desktop\222.mp4`
101 | **output_path**: audio save path,e.g.:`C:\Users\Desktop\output`
102 | **audio_format**: save audio formats, including **.m4a** , **.mp3** , **.wav** , **.aac** , **.flac** , **.wma** , **.ogg** , **.ac3** , **.amr** , **.aiff** , **.opus** , **.m4b** , **.caf** , **.dts** etc.
103 |
104 | ___
105 |
106 | ##### MergingVideoByTwo Node: The purpose is to merge two videos, for example, to combine two one-hour videos into a single two-hour video.
107 |
108 | 
109 |
110 | ###### Parameter Description
111 | **video1_path**: local video path,e.g.:`C:\Users\Desktop\111.mp4`
112 | **video2_path**: local video path,e.g.:`C:\Users\Desktop\222.mp4`
113 | **device**: there are two options: CPU and GPU,if you encounter an error while merging two videos using the CPU option, you can try using the GPU instead.
114 | **resolution_reference**: What is the size of the merged video? You can refer to either the first video or the second video, that is, video1 or video2.
115 | **output_path**: video save path,e.g.:`C:\Users\Desktop\output`
116 |
117 | ___
118 |
119 | ##### MergingVideoByPlenty Node: The purpose is to merge multiple short videos that have the same encoding format, resolution, and frame rate into a longer video.
120 |
121 | 
122 |
123 | ###### Parameter Description
124 | **video_path**: local video path,e.g.:`C:\Users\Desktop\111`,All videos in the specified path must have the same encoding format, frame rate, and resolution.
125 | **output_path**: video save path,e.g.:`C:\Users\Desktop\output`
126 |
127 | ___
128 |
129 | ##### StitchingVideo Node: The purpose is to stitching two videos, which can be done in two ways: horizontal stitching and vertical stitching.
130 |
131 | 
132 |
133 | ###### Parameter Description
134 | **video1_path**: local video path,e.g.:`C:\Users\Desktop\111.mp4`
135 | **video2_path**: local video path,e.g.:`C:\Users\Desktop\222.mp4`
136 | **device**: there are two options: CPU and GPU, if you encounter an error while stitching two videos using the CPU, you can try using the GPU instead.
137 | **use_audio**: which audio will be used in the stitched video? You can choose the audio from either the first video or the second video, that is, from video1 or video2.
138 | **stitching_type**: the methods for stitching videos are divided into two types: horizontal stitching and vertical stitching.
139 | **output_path**: video save path,e.g.:`C:\Users\Desktop\output`
140 | **scale_and_crop**: Scale and crop to match video1's dimensions.
141 | ___
142 |
143 | ##### MultiCuttingVideo Node: The purpose is to split one video into several smaller videos.
144 |
145 | 
146 |
147 | ###### Parameter Description
148 | **video_path**: local video path,e.g.:`C:\Users\Desktop\111.mp4`
149 | **output_path**: video save path,e.g.:`C:\Users\Desktop\output`
150 | **segment_time**: the length of each cut video is measured in seconds. It’s important to note that the video is cut based on keyframes, so the duration cannot be too short. Since there is no guarantee that each segment of the video will have a keyframe, the duration of each segment may not be the same, but it will be the closest possible.
151 |
152 | ___
153 |
154 | ##### SingleCuttingVideo Node: The purpose is to cut a specific time segment from a designated video.
155 |
156 | 
157 |
158 | ###### Parameter Description
159 | **video_path**: local video path,e.g.:`C:\Users\Desktop\111.mp4`
160 | **output_path**: video save path,e.g.:`C:\Users\Desktop\output`
161 | **start_time**: set the start time for the cut; for example, setting it to 00:00:10 means cutting from the 10th second of the video.
162 | **end_time**: set the end time for the cut; for example, setting it to 00:05:00 means cutting until the 5th minute of the video.
163 | ___
164 |
165 | ##### AddAudio Node: The purpose is to add audio to the video.
166 |
167 | 
168 |
169 | ###### Parameter Description
170 | **video_path**: local video path,e.g.:`C:\Users\Desktop\111.mp4`
171 | **audio_from**: the audio source can be from an audio file or from a video file, that is, audio_file or video_file.
172 | **file_path**: if audio_from is set to audio_file, then enter the path of an audio file here,if audio_from is set to video_file, then enter the path of a video file here, for example: `C:\Users\Desktop\111.mp3` or `C:\Users\Desktop\111.mp4`
173 | **delay_play**: the audio delay playback time is measured in seconds, with a default value of 0.
174 | **output_path**: video save path,e.g.:`C:\Users\Desktop\output`
175 |
176 | ___
177 |
178 | ##### PipVideo Node: Add picture-in-picture functionality to videos
179 |
180 | 
181 |
182 | ###### Parameter Description
183 | **video1_path**: The background video for picture-in-picture (PIP), e.g.:`C:\Users\Desktop\111.mp4`
184 | **video2_path**: The foreground video for PIP, e.g.:`C:\Users\Desktop\222.mp4`
185 | **device**: Choose between CPU and GPU. If errors occur with CPU, try switching to GPU.
186 | **use_audio**: Select which video's audio to use in the final output—either video1 or video2.
187 | **use_duration**: Determine the final video's duration based on video1 or video2.
188 | **align_type**: Position of the foreground video on the background—options: top-left, top-right, bottom-left, bottom-right, or center.
189 | **pip_fg_zoom**: PIP foreground scaling factor—larger values make the foreground smaller (scaled relative to background dimensions).
190 | **output_path**: Output video save path, e.g.:`C:\Users\Desktop\output`
191 | **scale_and_crop**: Scaling and cropping ratio.
192 | **fps**: v
193 | **is_chromakey**: Whether to apply green screen (chroma key) background removal.
194 |
195 | ___
196 |
197 | ##### VideoTransition Node: Add Transition Animation Effects to Two Videos
198 |
199 | 
200 |
201 | ###### Parameter Description
202 | **video1_path**: local video path,e.g.:`C:\Users\Desktop\111.mp4`
203 | **video2_path**: local video path,e.g.:`C:\Users\Desktop\222.mp4`
204 | **reference_video**: Specifies which video serves as the reference for determining the output video's dimensions and frame rate.
205 | **device**: Choose between CPU and GPU. If errors occur with CPU, try switching to GPU.
206 | **transition**: Transition effect name. Default options include:'fade', 'wipeleft', 'wiperight', 'wipeup', 'wipedown','slideleft', 'slideright', 'slideup', 'slidedown','circlecrop', 'rectcrop', 'distance', 'fadeblack', 'fadewhite','radial', 'smoothleft', 'smoothright', 'smoothup', 'smoothdown','circleopen', 'circleclose', 'vertopen', 'vertclose','horzopen', 'horzclose', 'dissolve', 'pixelize','diagtl', 'diagtr', 'diagbl', 'diagbr','hlslice', 'hrslice', 'vuslice', 'vdslice','hblur', 'fadegrays', 'wipetl', 'wipetr', 'wipebl', 'wipebr','squeezeh', 'squeezev', 'zoomin', 'fadefast', 'fadeslow','hlwind', 'hrwind', 'vuwind', 'vdwind','coverleft', 'coverright', 'coverup', 'coverdown','revealleft', 'revealright', 'revealup', 'revealdown',To check available transitions for your local FFmpeg version, run: `ffmpeg -hide_banner -h filter=xfade`.
207 | **transition_duration**: Transition duration in seconds. Maximum value: 3 seconds; cannot be < 0.1.
208 | **offset**: Transition start time in seconds. Cannot be ≥ (duration of video1 duration minus transition_duration).
209 | **output_path**: Output video save path, e.g.:`C:\Users\Desktop\output`
210 |
211 | ___
212 |
213 | ## Social Account Homepage
214 | - Bilibili:[My BILIBILI Homepage](https://space.bilibili.com/1303099255)
215 |
216 | ## Acknowledgments
217 |
218 | Thanks to all the contributors of the FFmpeg repository. [FFmpeg/FFmpeg](https://github.com/FFmpeg/FFmpeg)
219 |
220 | ## Star history
221 |
222 | [](https://star-history.com/#MoonHugo/ComfyUI-FFmpeg&Date)
--------------------------------------------------------------------------------
/__init__.py:
--------------------------------------------------------------------------------
1 | from .nodes_map import NODE_CLASS_MAPPINGS, NODE_DISPLAY_NAME_MAPPINGS
2 |
3 | __all__ = ["NODE_CLASS_MAPPINGS", "NODE_DISPLAY_NAME_MAPPINGS"]
--------------------------------------------------------------------------------
/assets/1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MoonHugo/ComfyUI-FFmpeg/1b50fc19886bcf56af8f38adbfd08d87fdbfdfb5/assets/1.png
--------------------------------------------------------------------------------
/assets/10.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MoonHugo/ComfyUI-FFmpeg/1b50fc19886bcf56af8f38adbfd08d87fdbfdfb5/assets/10.png
--------------------------------------------------------------------------------
/assets/11.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MoonHugo/ComfyUI-FFmpeg/1b50fc19886bcf56af8f38adbfd08d87fdbfdfb5/assets/11.png
--------------------------------------------------------------------------------
/assets/12.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MoonHugo/ComfyUI-FFmpeg/1b50fc19886bcf56af8f38adbfd08d87fdbfdfb5/assets/12.png
--------------------------------------------------------------------------------
/assets/13.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MoonHugo/ComfyUI-FFmpeg/1b50fc19886bcf56af8f38adbfd08d87fdbfdfb5/assets/13.png
--------------------------------------------------------------------------------
/assets/14.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MoonHugo/ComfyUI-FFmpeg/1b50fc19886bcf56af8f38adbfd08d87fdbfdfb5/assets/14.png
--------------------------------------------------------------------------------
/assets/2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MoonHugo/ComfyUI-FFmpeg/1b50fc19886bcf56af8f38adbfd08d87fdbfdfb5/assets/2.png
--------------------------------------------------------------------------------
/assets/3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MoonHugo/ComfyUI-FFmpeg/1b50fc19886bcf56af8f38adbfd08d87fdbfdfb5/assets/3.png
--------------------------------------------------------------------------------
/assets/4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MoonHugo/ComfyUI-FFmpeg/1b50fc19886bcf56af8f38adbfd08d87fdbfdfb5/assets/4.png
--------------------------------------------------------------------------------
/assets/5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MoonHugo/ComfyUI-FFmpeg/1b50fc19886bcf56af8f38adbfd08d87fdbfdfb5/assets/5.png
--------------------------------------------------------------------------------
/assets/6.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MoonHugo/ComfyUI-FFmpeg/1b50fc19886bcf56af8f38adbfd08d87fdbfdfb5/assets/6.png
--------------------------------------------------------------------------------
/assets/7.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MoonHugo/ComfyUI-FFmpeg/1b50fc19886bcf56af8f38adbfd08d87fdbfdfb5/assets/7.png
--------------------------------------------------------------------------------
/assets/8.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MoonHugo/ComfyUI-FFmpeg/1b50fc19886bcf56af8f38adbfd08d87fdbfdfb5/assets/8.png
--------------------------------------------------------------------------------
/assets/9.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MoonHugo/ComfyUI-FFmpeg/1b50fc19886bcf56af8f38adbfd08d87fdbfdfb5/assets/9.png
--------------------------------------------------------------------------------
/fonts/put_fontfile_here:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MoonHugo/ComfyUI-FFmpeg/1b50fc19886bcf56af8f38adbfd08d87fdbfdfb5/fonts/put_fontfile_here
--------------------------------------------------------------------------------
/func.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from PIL import Image
3 | import torch
4 | import subprocess
5 | import json
6 | import re
7 | import os
8 | import gc
9 | import shutil
10 | import time
11 | import glob
12 | from itertools import islice
13 | from concurrent.futures import ThreadPoolExecutor,as_completed
14 | from comfy.model_management import unload_all_models, soft_empty_cache
15 |
16 | def get_xfade_transitions():
17 | try:
18 | #执行命令:ffmpeg -hide_banner -h filter=xfade 查看可用的转场效果,执行ffmpeg命令获取xfade过滤器帮助信息
19 | result = subprocess.run(
20 | ['ffmpeg', '-hide_banner', '-h', 'filter=xfade'],
21 | capture_output=True,
22 | text=True,
23 | check=True
24 | )
25 |
26 | # 命令输出在stderr中
27 | output = result.stdout if result.stdout else result.stderr
28 | print(output)
29 | # 使用正则表达式匹配所有transition行
30 | pattern = r'^\s*(\w+)\s+-?\d+\b'
31 | data = output.split('\n')
32 | if len(data) == 0:
33 | transitions = [
34 | 'fade', 'wipeleft', 'wiperight', 'wipeup', 'wipedown',
35 | 'slideleft', 'slideright', 'slideup', 'slidedown',
36 | 'circlecrop', 'rectcrop', 'distance', 'fadeblack', 'fadewhite',
37 | 'radial', 'smoothleft', 'smoothright', 'smoothup', 'smoothdown',
38 | 'circleopen', 'circleclose', 'vertopen', 'vertclose',
39 | 'horzopen', 'horzclose', 'dissolve', 'pixelize',
40 | 'diagtl', 'diagtr', 'diagbl', 'diagbr',
41 | 'hlslice', 'hrslice', 'vuslice', 'vdslice',
42 | 'hblur', 'fadegrays', 'wipetl', 'wipetr', 'wipebl', 'wipebr',
43 | 'squeezeh', 'squeezev', 'zoomin', 'fadefast', 'fadeslow',
44 | 'hlwind', 'hrwind', 'vuwind', 'vdwind',
45 | 'coverleft', 'coverright', 'coverup', 'coverdown',
46 | 'revealleft', 'revealright', 'revealup', 'revealdown'
47 | ] # 如果没有找到任何transition,使用默认的
48 | else:
49 | transitions = []
50 | for line in data:
51 | match = re.search(pattern, line)
52 | if match and match.group(1) != 'none' and match.group(1) != 'custom':
53 | transitions.append(match.group(1))
54 |
55 | return sorted(transitions)
56 |
57 | except subprocess.CalledProcessError as e:
58 | print(f"执行ffmpeg命令出错: {e}")
59 | print(f"错误输出: {e.stderr}")
60 | return []
61 | except FileNotFoundError:
62 | print("错误: 找不到ffmpeg程序,请确保ffmpeg已安装并添加到系统PATH")
63 | return []
64 |
65 | def copy_image(image_path, destination_directory):
66 | try:
67 | # 获取图片文件名
68 | image_name = os.path.basename(image_path)
69 | # 构建目标路径
70 | destination_path = os.path.join(destination_directory, image_name)
71 | # 检查目标路径是否已有相同文件,避免重复复制
72 | if not os.path.exists(destination_path):
73 | shutil.copy(image_path, destination_path)
74 | return destination_path
75 | except Exception as e:
76 | print(f"Error copying image {image_path}: {e}")
77 | return None
78 |
79 | def copy_images_to_directory(image_paths, destination_directory):
80 | # 如果目标目录不存在,创建它
81 | if not os.path.exists(destination_directory):
82 | os.makedirs(destination_directory)
83 |
84 | # 使用字典来保持原始索引与路径的对应关系
85 | index_to_path = {i: image_path for i, image_path in enumerate(image_paths)}
86 | copied_paths = [None] * len(image_paths)
87 |
88 | # 使用多线程并行复制图片
89 | with ThreadPoolExecutor() as executor:
90 | # 提交所有任务
91 | futures = {executor.submit(copy_image, image_path, destination_directory): i for i, image_path in index_to_path.items()}
92 |
93 | # 等待所有任务完成并按顺序存储结果
94 | for future in as_completed(futures):
95 | index = futures[future]
96 | result = future.result()
97 | if result is not None:
98 | copied_paths[index] = result
99 |
100 | # 返回按原始顺序排列的新路径
101 | return [path for path in copied_paths if path is not None]
102 |
103 | def get_image_paths_from_directory(directory, start_index, length):
104 | image_extensions = {'.jpg', '.jpeg', '.png', '.gif', '.bmp', '.tiff'}
105 |
106 | # 创建排序后的文件生成器,直接在生成器中过滤
107 | def image_generator():
108 | for filename in sorted(os.listdir(directory)):
109 | if os.path.splitext(filename)[1].lower() in image_extensions:
110 | yield os.path.join(directory, filename)
111 |
112 | # 使用islice获取所需的图像路径
113 | selected_images = islice(image_generator(), start_index, start_index + length)
114 |
115 | return list(selected_images)
116 |
117 |
118 | # def get_image_paths_from_directory(directory, start_index, length):
119 | # # 获取目录下所有文件,并按照文件名排序
120 | # files = sorted(os.listdir(directory))
121 |
122 | # # 过滤掉非图片文件(这里只检查常见图片格式)
123 | # image_extensions = {'.jpg', '.jpeg', '.png', '.gif', '.bmp', '.tiff'}
124 | # image_files = [f for f in files if os.path.splitext(f)[1].lower() in image_extensions]
125 |
126 | # # 获取从start_index开始的length个图片路径
127 | # selected_images = image_files[start_index:start_index + length]
128 |
129 | # # 返回完整路径列表
130 | # image_paths = [os.path.join(directory, image_file) for image_file in selected_images]
131 |
132 | # return image_paths
133 |
134 | def generate_template_string(filename):
135 | match = re.search(r'\d+', filename)
136 | return re.sub(r'\d+', lambda x: f'%0{len(x.group())}d', filename) if match else filename
137 |
138 | def tensor2pil(image):
139 | return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8))
140 |
141 | def pil2tensor(image):
142 | return torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0)
143 |
144 | def getVideoInfo(video_path):
145 | command = [
146 | 'ffprobe', '-v', 'error', '-select_streams', 'v:0', '-show_entries',
147 | 'stream=avg_frame_rate,duration,width,height', '-of', 'json', video_path
148 | ]
149 | # 运行ffprobe命令
150 | result = subprocess.run(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
151 | # 将输出转化为字符串
152 | output = result.stdout.decode('utf-8').strip()
153 | print(output)
154 | data = json.loads(output)
155 | # 查找视频流信息
156 | if 'streams' in data and len(data['streams']) > 0:
157 | stream = data['streams'][0] # 获取第一个视频流
158 | fps = stream.get('avg_frame_rate')
159 | if fps is not None:
160 | # 帧率可能是一个分数形式的字符串,例如 "30/1" 或 "20.233000"
161 | if '/' in fps:
162 | num, denom = map(int, fps.split('/'))
163 | fps = num / denom
164 | else:
165 | fps = float(fps) # 直接转换为浮点数
166 | width = int(stream.get('width'))
167 | height = int(stream.get('height'))
168 | duration = float(stream.get('duration'))
169 | return_data = {'fps': fps, 'width': width, 'height': height, 'duration': duration}
170 | else:
171 | return_data = {}
172 | return return_data
173 |
174 | def get_image_size(image_path):
175 | # 打开图像文件
176 | with Image.open(image_path) as img:
177 | # 获取图像的宽度和高度
178 | width, height = img.size
179 | return width, height
180 |
181 | def has_audio(video_path):
182 | cmd = [
183 | 'ffprobe',
184 | '-v', 'error',
185 | '-select_streams', 'a:0',
186 | '-show_entries', 'stream=codec_type',
187 | '-of', 'default=noprint_wrappers=1:nokey=1',
188 | video_path
189 | ]
190 |
191 | result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
192 | return result.stdout.decode().strip() == 'audio'
193 |
194 | def set_file_name(video_path):
195 | file_name = os.path.basename(video_path)
196 | file_extension = os.path.splitext(file_name)[1]
197 | #文件名根据年月日时分秒来命名
198 | file_name = time.strftime("%Y%m%d%H%M%S", time.localtime()) + file_extension
199 | return file_name
200 |
201 | def video_type():
202 | return ('.mp4', '.avi', '.mov', '.mkv','.rmvb','.wmv','.flv')
203 | def audio_type():
204 | return ('.mp3', '.wav', '.aac', '.flac','.m4a','.wma','.ogg','.amr','.ape','.ac3','.aiff','.opus','.m4b','.caf','.dts')
205 |
206 | def validate_time_format(time_str):
207 | pattern = r'^([0-1][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9]|\d{1,2})$'
208 | return bool(re.match(pattern, time_str))
209 |
210 | def get_video_files(directory):
211 | video_extensions = ['*.mp4', '*.avi', '*.mov', '*.mkv','*.rmvb', '*.wmv', '*.flv']
212 | video_files = []
213 | for ext in video_extensions:
214 | video_files.extend(glob.glob(os.path.join(directory, ext)))
215 | # 排序文件名
216 | video_files.sort()
217 | return video_files
218 |
219 | def save_image(image, path):
220 | tensor2pil(image).save(path)
221 |
222 | def clear_memory():
223 | gc.collect()
224 | unload_all_models()
225 | soft_empty_cache()
--------------------------------------------------------------------------------
/nodes/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MoonHugo/ComfyUI-FFmpeg/1b50fc19886bcf56af8f38adbfd08d87fdbfdfb5/nodes/__init__.py
--------------------------------------------------------------------------------
/nodes/addAudio.py:
--------------------------------------------------------------------------------
1 | import os
2 | import subprocess
3 | from ..func import set_file_name,video_type,audio_type,has_audio
4 |
5 | class AddAudio:
6 | def __init__(self):
7 | pass
8 |
9 | @classmethod
10 | def INPUT_TYPES(cls):
11 | return {
12 | "required": {
13 | "video_path": ("STRING", {"default":"C:/Users/Desktop/video.mp4",}),
14 | "audio_from":(["audio_file","video_file"], {"default":"audio_file",}),
15 | "file_path": ("STRING", {"default":"C:/Users/Desktop/output",}),
16 | 'delay_play':("INT",{"default":0,"min":0,}),
17 | "output_path": ("STRING", {"default":"C:/Users/Desktop/output/",}),
18 | },
19 | }
20 |
21 | RETURN_TYPES = ("STRING",)
22 | RETURN_NAMES = ("video_complete_path",)
23 | FUNCTION = "add_audio"
24 | OUTPUT_NODE = True
25 | CATEGORY = "🔥FFmpeg"
26 |
27 | def add_audio(self, video_path, audio_from, file_path,delay_play,output_path):
28 | try:
29 | video_path = os.path.abspath(video_path).strip()
30 | file_path = os.path.abspath(file_path).strip()
31 | output_path = os.path.abspath(output_path).strip()
32 | # 视频不存在
33 | if not video_path.lower().endswith(video_type()):
34 | raise ValueError("video_path:"+video_path+"不是视频文件(video_path:"+video_path+" is not a video file)")
35 | if not os.path.isfile(video_path):
36 | raise ValueError("video_path:"+video_path+"不存在(video_path:"+video_path+" does not exist)")
37 |
38 | if not os.path.isfile(file_path):
39 | raise ValueError("file_path:"+file_path+"不存在(file_path:"+file_path+" does not exist)")
40 |
41 | #判断output_path是否是一个目录
42 | if not os.path.isdir(output_path):
43 | raise ValueError("output_path:"+output_path+"不是目录(output_path:"+output_path+" is not a directory)")
44 |
45 | if audio_from == "video_file":
46 | if not file_path.lower().endswith(video_type()):
47 | raise ValueError("file_path:"+file_path+"不是视频文件(file_path:"+file_path+" is not a video file)")
48 |
49 | if not has_audio(file_path):
50 | raise ValueError("file_path:"+file_path+"没有音频,请选择一个有音频的视频文件。(file_path:"+file_path+" has no audio, please select a video file that has audio.)")
51 |
52 | else:
53 | if not file_path.lower().endswith(audio_type()):
54 | raise ValueError("file_path:"+file_path+"不是音频文件(file_path:"+file_path+" is not a audio file)")
55 |
56 | file_name = set_file_name(video_path)
57 |
58 | output_path = os.path.join(output_path, file_name)
59 |
60 | # ffmpeg -i video.mp4 -i audio.mp3 -c:v copy -c:a aac -strict experimental -shortest output.mp4
61 |
62 | if audio_from == "audio_file":
63 | #ffmpeg -i video.mp4 -i audio.mp3 -map 0:v -map 1:a -c:v copy -c:a copy -shortest output.mp4
64 | command = [
65 | 'ffmpeg', '-i', video_path, '-itsoffset', str(delay_play),# 输入视频路径
66 | '-i', file_path, # 输入音频文件路径
67 | '-map', '0:v', #从第一个输入文件(video.mp4)中选择视频流。
68 | '-map', '1:a', #从第二个输入文件(audio.mp3)中选择音频流
69 | '-c:v', 'copy', # 复制视频流
70 | '-c:a', 'copy', # 复制音频流
71 | '-shortest', #-shortest 参数让音频或视频的时间对齐,即音频或视频的长度较短的那个为准
72 | output_path,
73 | ]
74 | else:
75 | # ffmpeg -i source_video.mp4 -i target_video.mp4 -map 0:a -map 1:v -c:v copy -c:a aac -strict experimental -shortest output.mp4
76 | command = [
77 | 'ffmpeg', '-itsoffset',str(delay_play),
78 | '-i', file_path,'-i',video_path, # 输入视频路径
79 | '-map', '0:a', '-map', '1:v', '-c:v', 'copy', '-c:a', 'copy',
80 | '-strict', 'experimental',
81 | '-shortest',
82 | output_path,
83 | ]
84 |
85 | # 执行命令并检查错误
86 | result = subprocess.run(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
87 | # 检查返回码
88 | if result.returncode != 0:
89 | # 如果有错误,输出错误信息
90 | print(f"Error: {result.stderr.decode('utf-8')}")
91 | raise ValueError(f"Error: {result.stderr.decode('utf-8')}")
92 | else:
93 | # 输出标准输出信息
94 | print(result.stdout)
95 |
96 | return (output_path,)
97 | except Exception as e:
98 | raise ValueError(e)
--------------------------------------------------------------------------------
/nodes/addImgWatermark.py:
--------------------------------------------------------------------------------
1 | import os
2 | import subprocess
3 | from ..func import get_image_size,set_file_name,video_type
4 |
5 |
6 | class AddImgWatermark:
7 |
8 | # 初始化方法
9 | def __init__(self):
10 | pass
11 |
12 | @classmethod
13 | def INPUT_TYPES(s):
14 | return {
15 | "required": {
16 | "video_path": ("STRING", {"default":"C:/Users/Desktop/video.mp4",}),
17 | "output_path": ("STRING", {"default":"C:/Users/Desktop/output/",}),
18 | "watermark_image": ("STRING", {"default":"C:/Users/Desktop/logo.png",}),
19 | "watermark_img_width": ("INT", {"default": 100,"min": 1, "step": 1}),
20 | "position_x": ("INT", {"default": 10, "step": 1}),
21 | "position_y": ("INT", {"default": 10, "step": 1}),
22 | },
23 | }
24 |
25 | RETURN_TYPES = ("STRING",)
26 | RETURN_NAMES = ("video_complete_path",)
27 | FUNCTION = "add_img_watermark"
28 | OUTPUT_NODE = True
29 | CATEGORY = "🔥FFmpeg"
30 |
31 | def add_img_watermark(self,video_path,output_path,watermark_image,watermark_img_width,position_x,position_y):
32 | try:
33 |
34 | video_path = os.path.abspath(video_path).strip()
35 | output_path = os.path.abspath(output_path).strip()
36 | # 视频不存在
37 | if not video_path.lower().endswith(video_type()):
38 | raise ValueError("video_path:"+video_path+"不是视频文件(video_path:"+video_path+" is not a video file)")
39 |
40 | if not os.path.exists(video_path):
41 | raise ValueError("video_path:"+video_path+"不存在(video_path:"+video_path+" does not exist)")
42 |
43 | #判断output_path是否是一个目录
44 | if not os.path.isdir(output_path):
45 | raise ValueError("output_path:"+output_path+"不是目录(output_path:"+output_path+" is not a directory)")
46 |
47 | # 文件不是图片
48 | if not watermark_image.lower().endswith(('.png', '.jpg', '.jpeg', '.bmp', '.gif')):
49 | raise ValueError("watermark_image不是图片文件(watermark file is not a image file)")
50 |
51 | if not os.path.exists(watermark_image):
52 | raise ValueError("watermark_image:"+watermark_image+"不存在(watermark_image :"+watermark_image+" does not exist)")
53 |
54 | file_name = set_file_name(video_path)
55 | output_path = os.path.join(output_path, file_name)
56 | width,height = get_image_size(watermark_image)
57 | watermark_img_height = int(height * watermark_img_width / width) # 按比例计算新高度
58 | cmd = [
59 | 'ffmpeg',
60 | '-i', video_path,
61 | '-i', watermark_image,
62 | '-filter_complex',f"[1:v]scale={watermark_img_width}:{watermark_img_height}[wm];[0:v][wm]overlay=x={position_x}:y={position_y}:format=auto",
63 | output_path,
64 | ]
65 | result = subprocess.run(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
66 | #os.remove(image_save_path) # 删除临时水印图片
67 | # 检查返回码
68 | if result.returncode != 0:
69 | # 如果有错误,输出错误信息
70 | print(f"Error: {result.stderr.decode('utf-8')}")
71 | raise ValueError(f"Error: {result.stderr.decode('utf-8')}")
72 | else:
73 | # 输出标准输出信息
74 | print(result.stdout)
75 | except Exception as e:
76 | raise ValueError(e)
77 | return (output_path,)
--------------------------------------------------------------------------------
/nodes/addTextWatermark.py:
--------------------------------------------------------------------------------
1 | import os
2 | import subprocess
3 | import folder_paths
4 | from ..func import set_file_name,video_type
5 |
6 | current_path = os.path.abspath(__file__)
7 | font_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.normpath(__file__))), 'fonts')
8 | folder_paths.folder_names_and_paths["fonts"] = ([font_dir], {'.ttf'})
9 |
10 | class AddTextWatermark:
11 |
12 | # 初始化方法
13 | def __init__(self):
14 | pass
15 |
16 | @classmethod
17 | def INPUT_TYPES(s):
18 | return {
19 | "required": {
20 | "video_path": ("STRING", {"default":"C:/Users/Desktop/video.mp4",}),
21 | "output_path": ("STRING", {"default":"C:/Users/Desktop/output/",}),
22 | 'font_file': (["default"] + folder_paths.get_filename_list("fonts"), ),
23 | 'font_size': ("INT", {"default": 15, "min": 1, "max": 1000, "step": 1}),
24 | 'font_color': ("STRING", {"default": "#FFFFFF"}),
25 | "text": ("STRING", {"default": "Watermark"}),
26 | "position_x": ("INT", {"default": 10, "step": 1}),
27 | "position_y": ("INT", {"default": 10, "step": 1}),
28 | },
29 | }
30 |
31 | RETURN_TYPES = ("STRING","STRING",)
32 | RETURN_NAMES = ("video_path","output_path",)
33 | FUNCTION = "add_text_watermark"
34 | OUTPUT_NODE = True
35 | CATEGORY = "🔥FFmpeg"
36 |
37 | def add_text_watermark(self,video_path,output_path,font_file,font_size,font_color,text,position_x,position_y):
38 | try:
39 | video_path = os.path.abspath(video_path).strip()
40 | output_path = os.path.abspath(output_path).strip()
41 | # 视频不存在
42 | if not video_path.lower().endswith(video_type()):
43 | raise ValueError("video_path:"+video_path+"不是视频文件(video_path:"+video_path+" is not a video file)")
44 |
45 | if not os.path.exists(video_path):
46 | raise ValueError("video_path:"+video_path+"不存在(video_path:"+video_path+" does not exist)")
47 |
48 | #判断output_path是否是一个目录
49 | if not os.path.isdir(output_path):
50 | raise ValueError("output_path:"+output_path+"不是目录(output_path:"+output_path+" is not a directory)")
51 |
52 | file_name = set_file_name(video_path)
53 | output_path = os.path.join(output_path, file_name)
54 |
55 | # 替换为双斜杠
56 | font_path = os.path.join(font_dir, font_file).replace("\\", "/").replace(":", "\\:")
57 | # 构建命令 C\\:/Windows/Fonts/simhei.ttf fontfile='J\\:/Comfyui-for-OOTDiffusion/ComfyUI/custom_nodes/ComfyUI-FFmpeg/fonts/Alibaba-PuHuiTi-Heavy.ttf
58 | if font_file == "default":
59 | cmd = [
60 | 'ffmpeg',
61 | '-i', video_path,
62 | '-vf', f"drawtext=text='{text}':x={position_x}:y={position_y}:fontsize={font_size}:fontcolor={font_color}",
63 | output_path,
64 | ]
65 | else:
66 | cmd = [
67 | 'ffmpeg',
68 | '-i', video_path,
69 | '-vf', f"drawtext=text='{text}':x={position_x}:y={position_y}:fontfile='{font_path}':fontsize={font_size}:fontcolor={font_color}",
70 | output_path,
71 | ]
72 | result = subprocess.run(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
73 | # 检查返回码
74 | if result.returncode != 0:
75 | # 如果有错误,输出错误信息
76 | print(f"Error: {result.stderr.decode('utf-8')}")
77 | raise ValueError(f"Error: {result.stderr.decode('utf-8')}")
78 | else:
79 | # 输出标准输出信息
80 | print(result.stdout)
81 | except Exception as e:
82 | raise ValueError(e)
83 |
84 | return (video_path,output_path)
--------------------------------------------------------------------------------
/nodes/extractAudio.py:
--------------------------------------------------------------------------------
1 | import os
2 | import subprocess
3 | from ..func import video_type
4 |
5 | class ExtractAudio:
6 | def __init__(self):
7 | pass
8 |
9 | @classmethod
10 | def INPUT_TYPES(cls):
11 | return {
12 | "required": {
13 | "video_path": ("STRING", {"default":"C:/Users/Desktop/video.mp4",}),
14 | "output_path": ("STRING", {"default":"C:/Users/Desktop/output",}),
15 | "audio_format": ([".m4a",".mp3",".wav",".aac",".flac",".wma",".ogg",".ac3",".amr",".aiff",".opus",".m4b",".caf",".dts"], {"default":".m4a",}),
16 | },
17 | }
18 |
19 | RETURN_TYPES = ("STRING",)
20 | RETURN_NAMES = ("audio_complete_path",)
21 | FUNCTION = "extract_audio"
22 | OUTPUT_NODE = True
23 | CATEGORY = "🔥FFmpeg"
24 |
25 | def extract_audio(self, video_path, output_path, audio_format):
26 | try:
27 | video_path = os.path.abspath(video_path).strip()
28 | output_path = os.path.abspath(output_path).strip()
29 | # 视频不存在
30 | if not video_path.lower().endswith(video_type()):
31 | raise ValueError("video_path:"+video_path+"不是视频文件(video_path:"+video_path+" is not a video file)")
32 | if not os.path.isfile(video_path):
33 | raise ValueError("video_path:"+video_path+"不存在(video_path:"+video_path+" does not exist)")
34 |
35 | #判断output_path是否是一个目录
36 | if not os.path.isdir(output_path):
37 | raise ValueError("output_path:"+output_path+"不是目录(output_path:"+output_path+" is not a directory)")
38 | file_name = os.path.splitext(os.path.basename(video_path))[0]
39 | output_path = os.path.join(output_path, file_name + audio_format)
40 |
41 | if audio_format == ".m4a":
42 | command = [
43 | 'ffmpeg', '-i', video_path, # 输入视频路径
44 | '-vn', '-acodec', 'copy', # 不处理视频流,复制音频流
45 | output_path,
46 | ]
47 | elif audio_format == ".mp3":
48 | command = [
49 | 'ffmpeg', '-i', video_path, # 输入视频路径
50 | '-vn', '-c:a', 'libmp3lame', '-q:a','2', #-q:a 2:指定音频质量,范围是 0 到 9,其中 0 是最高质量,2 通常是非常好的质量和文件大小的平衡。
51 | output_path,
52 | ]
53 | elif audio_format == ".wav":
54 | command = [
55 | 'ffmpeg', '-i', video_path, # 输入视频路径
56 | '-vn','-c:a','pcm_s16le',
57 | output_path,
58 | ]
59 | elif audio_format == ".aac":
60 | command = [
61 | 'ffmpeg', '-i', video_path, # 输入视频路径
62 | '-vn','-c:a','aac',
63 | output_path,
64 | ]
65 | elif audio_format == ".flac":
66 | command = [
67 | 'ffmpeg', '-i', video_path, # 输入视频路径
68 | '-vn','-c:a','flac',
69 | output_path,
70 | ]
71 | elif audio_format == ".wma":
72 | command = [
73 | 'ffmpeg', '-i', video_path, # 输入视频路径
74 | '-vn','-c:a','wmav2',
75 | output_path,
76 | ]
77 | elif audio_format == ".ogg":
78 | command = [
79 | 'ffmpeg', '-i', video_path, # 输入视频路径
80 | '-vn','-c:a','libvorbis','-q:a', '5',
81 | output_path,
82 | ]
83 | elif audio_format == ".ac3":
84 | command = [
85 | 'ffmpeg', '-i', video_path, # 输入视频路径
86 | '-vn','-c:a','ac3',
87 | output_path,
88 | ]
89 | elif audio_format == ".amr":
90 | command = [
91 | 'ffmpeg', '-i', video_path, # 输入视频路径
92 | '-vn','-c:a','libopencore_amrnb','-ar', '8000','-b:a','12.2k',
93 | '-ac','1',
94 | output_path,
95 | ]
96 | elif audio_format == ".aiff":
97 | command = [
98 | 'ffmpeg', '-i', video_path, # 输入视频路径
99 | '-vn','-c:a','pcm_s16be',
100 | output_path,
101 | ]
102 | elif audio_format == ".opus":
103 | command = [
104 | 'ffmpeg', '-i', video_path, # 输入视频路径
105 | '-vn','-c:a','libopus',
106 | output_path,
107 | ]
108 | elif audio_format == ".m4b":
109 | command = [
110 | 'ffmpeg', '-i', video_path, # 输入视频路径
111 | '-vn','-c:a','aac',
112 | '-b:a', '128k', # 设置比特率为 128kbps
113 | output_path,
114 | ]
115 | elif audio_format == ".caf":
116 | command = [
117 | 'ffmpeg', '-i', video_path, # 输入视频路径
118 | '-vn','-c:a','pcm_s16le',
119 | output_path,
120 | ]
121 | elif audio_format == ".dts":
122 | command = [
123 | 'ffmpeg', '-i', video_path, # 输入视频路径
124 | '-vn','-c:a','dca',
125 | '-strict','-2',
126 | output_path,
127 | ]
128 | else:
129 | raise ValueError("不支持的音频格式:"+audio_format+"(Unsupported audio formats:"+audio_format+")")
130 |
131 | # 执行命令并检查错误
132 | result = subprocess.run(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
133 | # 检查返回码
134 | if result.returncode != 0:
135 | # 如果有错误,输出错误信息
136 | print(f"Error: {result.stderr.decode('utf-8')}")
137 | raise ValueError(f"Error: {result.stderr.decode('utf-8')}")
138 | else:
139 | # 输出标准输出信息
140 | print(result.stdout)
141 |
142 | return (output_path,)
143 | except Exception as e:
144 | raise ValueError(e)
--------------------------------------------------------------------------------
/nodes/frames2video.py:
--------------------------------------------------------------------------------
1 | import os
2 | import subprocess
3 | from ..func import get_image_size,generate_template_string
4 |
5 | class Frames2Video:
6 |
7 | # 初始化方法
8 | def __init__(self):
9 | pass
10 |
11 | @classmethod
12 | def INPUT_TYPES(s):
13 | return {
14 | "required": {
15 | "frame_path": ("STRING", {"default": "C:/Users/Desktop",}),
16 | "fps": ("FLOAT", {
17 | "default": 30,
18 | "min": 1,
19 | "max": 120,
20 | "step": 1,
21 | "display": "number",
22 | }),
23 | "video_name": ("STRING", {"default": "new_video"}),
24 | "output_path": ("STRING", {"default": "C:/Users/Desktop/output"}),
25 | "device":(["CPU","GPU"],{"default": "CPU",}),
26 | },
27 | "optional":{
28 | "audio_path":("STRING",{"default": "C:/Users/audio.mp3",}),
29 | }
30 | }
31 |
32 | RETURN_TYPES = ("STRING","STRING",)
33 | RETURN_NAMES = ("frame_path","output_path",)
34 | FUNCTION = "frames2video"
35 | OUTPUT_NODE = True
36 | CATEGORY = "🔥FFmpeg"
37 |
38 | def frames2video(self,frame_path,fps,video_name,output_path,audio_path,device):
39 | try:
40 | frame_path = os.path.abspath(frame_path).strip()
41 | output_path = os.path.abspath(output_path).strip()
42 | if audio_path != "":
43 | audio_path = os.path.abspath(audio_path).strip()
44 | if not os.path.exists(audio_path):
45 | raise ValueError("audio_path:"+audio_path+"不存在(audio_path:"+audio_path+" does not exist)")
46 | if not os.path.exists(frame_path):
47 | raise ValueError("frame_path:"+frame_path+"不存在(frame_path:"+frame_path+" does not exist)")
48 |
49 | #判断output_path是否是一个目录
50 | if not os.path.isdir(output_path):
51 | raise ValueError("output_path:"+output_path+"不是目录(output_path:"+output_path+" is not a directory)")
52 |
53 | #output_path = f"{output_path}\\{video_name}.mp4" # 将输出目录和输出文件名合并为一个输出路径
54 | output_path = os.path.join(output_path, f"{video_name}.mp4")
55 | # 获取输入目录中的所有图像文件
56 | valid_extensions = ('.jpg', '.jpeg', '.png', '.gif', '.bmp', '.tiff')
57 | # 获取所有图片并按文件名排序
58 | images = [os.path.join(frame_path, f) for f in os.listdir(frame_path) if f.endswith(valid_extensions)]
59 | # 按文件名进行排序
60 | images.sort()
61 |
62 | if len(images) == 0:
63 | raise FileNotFoundError("目录:"+frame_path+"中没有图片文件(No image files found in directory:"+frame_path+")")
64 |
65 | # 构建ffmpeg命令
66 | width,height = get_image_size(images[0]);
67 | img_template_string = generate_template_string(os.path.basename(images[0]))
68 | if audio_path != '':
69 | if device == "CPU":
70 | cmd = [
71 | 'ffmpeg',
72 | '-framerate', str(fps),
73 | '-i', f'{frame_path}/{img_template_string}',
74 | '-i', audio_path, # 添加音频文件路径
75 | '-vf', f'scale={width}:{height}',
76 | '-c:v', 'libx264',
77 | '-crf', '28',
78 | '-pix_fmt', 'yuv420p',
79 | '-shortest',
80 | '-y',
81 | str(output_path)
82 | ]
83 | else:
84 | cmd = [
85 | 'ffmpeg',
86 | '-framerate', str(fps),
87 | '-i', f'{frame_path}/{img_template_string}',
88 | '-i', audio_path, # 添加音频文件路径
89 | '-vf', f'scale={width}:{height}',
90 | '-c:v', 'h264_nvenc', # 使用 GPU 加速的 NVENC 编码器
91 | '-preset', 'fast', # 选择一个合适的 preset
92 | '-cq', '22', # 设置质量,适应NVENC(类似 CRF)
93 | '-pix_fmt', 'yuv420p',
94 | '-shortest',
95 | '-y',
96 | str(output_path)
97 | ]
98 |
99 | else:
100 | if device == "CPU":
101 | cmd = [
102 | 'ffmpeg',
103 | '-framerate', str(fps),
104 | '-i', f'{frame_path}/{img_template_string}',
105 | '-vf', f'scale={width}:{height}',
106 | '-c:v', 'libx264',
107 | '-crf', '28',
108 | '-pix_fmt', 'yuv420p',
109 | '-shortest',
110 | '-y',
111 | str(output_path)
112 | ]
113 | else:
114 | cmd = [
115 | 'ffmpeg',
116 | '-framerate', str(fps),
117 | '-i', f'{frame_path}/{img_template_string}',
118 | '-vf', f'scale={width}:{height}',
119 | '-c:v', 'h264_nvenc', # 使用 GPU 加速的 NVENC 编码器
120 | '-preset', 'fast', # 选择一个合适的 preset
121 | '-cq', '22', # 设置质量,适应NVENC(类似 CRF)
122 | '-pix_fmt', 'yuv420p',
123 | '-shortest',
124 | '-y',
125 | str(output_path)
126 | ]
127 | # 执行ffmpeg命令
128 | result = subprocess.run(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
129 | if result.returncode != 0:
130 | # 如果有错误,输出错误信息
131 | print(f"Error: {result.stderr.decode('utf-8')}")
132 | raise ValueError(f"Error: {result.stderr.decode('utf-8')}")
133 | else:
134 | # 输出标准输出信息
135 | print(result.stdout)
136 | frame_path = str(frame_path) # 输出路径为字符串
137 | return (frame_path,output_path)
138 | except Exception as e:
139 | raise ValueError(e)
--------------------------------------------------------------------------------
/nodes/imageCopy.py:
--------------------------------------------------------------------------------
1 | from ..func import copy_images_to_directory
2 |
3 | class AnyType(str):
4 | def __ne__(self, __value: object) -> bool:
5 | return False
6 |
7 |
8 | any_type = AnyType("*")
9 |
10 | class ImageCopy:
11 | def __init__(self):
12 | pass
13 |
14 | @classmethod
15 | def INPUT_TYPES(cls):
16 | return {
17 | "required": {
18 | "image_paths": (any_type,),
19 | "output_path": ("STRING", {"default": "C:/Users/Desktop/output"}),
20 | },
21 | }
22 |
23 | RETURN_TYPES = ("LIST","INT","STRING")
24 | RETURN_NAMES = ("image_paths","image_length","output_path")
25 | FUNCTION = "image_copy"
26 | OUTPUT_NODE = True
27 | CATEGORY = "🔥FFmpeg/auxiliary tool"
28 |
29 | def image_copy(self, image_paths, output_path):
30 | try:
31 | image_output_path = copy_images_to_directory(image_paths,output_path)
32 | return (image_output_path,len(image_output_path),output_path)
33 | except Exception as e:
34 | raise ValueError(e)
--------------------------------------------------------------------------------
/nodes/imagePath2Tensor.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from PIL import ImageOps
3 | import comfy
4 | from PIL import Image
5 | import numpy as np
6 | from ..func import clear_memory
7 | import gc
8 |
9 | class AnyType(str):
10 | def __ne__(self, __value: object) -> bool:
11 | return False
12 |
13 | any_type = AnyType("*")
14 |
15 | class ImagePath2Tensor:
16 | def __init__(self):
17 | pass
18 |
19 | @classmethod
20 | def INPUT_TYPES(cls):
21 | return {
22 | "required": {
23 | "image_paths": (any_type,),
24 | },
25 | }
26 |
27 | RETURN_TYPES = ("IMAGE","INT")
28 | RETURN_NAMES = ("image","image_count")
29 | FUNCTION = "image_path_to_tensor"
30 | OUTPUT_NODE = True
31 | CATEGORY = "🔥FFmpeg/auxiliary tool"
32 |
33 | def image_path_to_tensor(self, image_paths):
34 |
35 | #['D:\\Cache\\222\\frame_00000121.png', 'D:\\Cache\\222\\frame_00000122.png']
36 | images = []
37 | for image_path in image_paths:
38 | try:
39 | # Open and process the image
40 | with Image.open(image_path) as img:
41 | img = ImageOps.exif_transpose(img).convert("RGB")
42 | # 直接转换为张量
43 | image_tensor = torch.from_numpy(np.array(img).astype(np.float32) / 255.0).unsqueeze(0)
44 | images.append(image_tensor)
45 | except Exception as e:
46 | print(f"Error processing image {image_path}: {e}")
47 | continue # Skip to the next image on error
48 |
49 | if not images:
50 | raise ValueError("No images loaded successfully.")
51 |
52 | if len(images) == 1:
53 | return (images[0], 1)
54 |
55 | # 合并多个图像
56 |
57 | image1 = images[0]
58 | for image2 in images[1:]:
59 | if image1.shape[1:] != image2.shape[1:]:
60 | # 调整大小并合并
61 | image2 = comfy.utils.common_upscale(image2.movedim(-1, 1), image1.shape[2], image1.shape[1], "bilinear", "center").movedim(1, -1)
62 | image1 = torch.cat((image1, image2), dim=0)
63 |
64 | length = len(images)
65 | result = (image1, length)
66 | del images
67 | del image1
68 | clear_memory()
69 |
70 | return result
--------------------------------------------------------------------------------
/nodes/imagesSave.py:
--------------------------------------------------------------------------------
1 | import os
2 | import torch
3 | import gc
4 | from concurrent.futures import ThreadPoolExecutor
5 | from ..func import save_image,clear_memory
6 | file_name_num_start = 0
7 |
8 | class ImagesSave:
9 | def __init__(self):
10 | pass
11 |
12 | @classmethod
13 | def INPUT_TYPES(cls):
14 | return {
15 | "required": {
16 | "images": ("IMAGE", ),
17 | "output_path": ("STRING", {"default":"C:/Users/Desktop/output",}),
18 | },
19 | }
20 |
21 | RETURN_TYPES = ("INT",)
22 | RETURN_NAMES = ("images_length",)
23 | FUNCTION = "images_save"
24 | OUTPUT_NODE = True
25 | CATEGORY = "🔥FFmpeg/auxiliary tool"
26 |
27 | def images_save(self, images,output_path):
28 | try:
29 | output_path = os.path.abspath(output_path).strip()
30 | #判断output_path是否是一个目录
31 | if not os.path.isdir(output_path):
32 | raise ValueError("output_path:"+output_path+"不是目录(output_path:"+output_path+" is not a directory)")
33 |
34 | count = 0
35 | global file_name_num_start
36 | if len(os.listdir(output_path)) == 0:
37 | file_name_num_start = 0 # 要保证图片的名称的数字从0开始,否则合并视频时会报错
38 | with ThreadPoolExecutor() as executor:
39 | futures = []
40 | for image in images:
41 | file_name_num_start += 1
42 | futures.append(executor.submit(save_image, image, os.path.join(output_path, f"output_image_{file_name_num_start:09d}.png")))
43 | count += 1
44 |
45 | for future in futures:
46 | future.result() # 确保所有任务完成
47 | del images
48 | clear_memory()
49 |
50 | return (count,)
51 | except Exception as e:
52 | raise ValueError(e)
--------------------------------------------------------------------------------
/nodes/loadImageFromDir.py:
--------------------------------------------------------------------------------
1 | from ..func import get_image_paths_from_directory
2 |
3 | class LoadImageFromDir:
4 | def __init__(self):
5 | pass
6 |
7 | @classmethod
8 | def INPUT_TYPES(cls):
9 | return {
10 | "required": {
11 | "images_path": ("STRING", {"default":"C:/Users/Desktop/",}),
12 | "start_index": ("INT",{"default":0,"min":0,}),
13 | "length": ("INT",{"default":0,"min":0,})
14 | },
15 | }
16 |
17 | RETURN_TYPES = ("LIST",)
18 | RETURN_NAMES = ("image_paths",)
19 | FUNCTION = "load_image_from_dir"
20 | OUTPUT_NODE = True
21 | CATEGORY = "🔥FFmpeg/auxiliary tool"
22 |
23 | def load_image_from_dir(self, images_path, start_index, length):
24 | try:
25 | image_paths = get_image_paths_from_directory(images_path, start_index, length)
26 | return (image_paths,)
27 | except Exception as e:
28 | raise ValueError(e)
--------------------------------------------------------------------------------
/nodes/mergingVideoByPlenty.py:
--------------------------------------------------------------------------------
1 | import os
2 | import subprocess
3 | import torch
4 | import time
5 | from ..func import get_video_files,set_file_name
6 |
7 | class MergingVideoByPlenty:
8 | def __init__(self):
9 | pass
10 |
11 | @classmethod
12 | def INPUT_TYPES(cls):
13 | return {
14 | "required": {
15 | "video_path": ("STRING", {"default":"C:/Users/Desktop/",}),
16 | "output_path": ("STRING", {"default": "C:/Users/Desktop/output"}),
17 | },
18 | }
19 |
20 | RETURN_TYPES = ("STRING",)
21 | RETURN_NAMES = ("video_complete_path",)
22 | FUNCTION = "merging_video_by_plenty"
23 | OUTPUT_NODE = True
24 | CATEGORY = "🔥FFmpeg"
25 |
26 | def merging_video_by_plenty(self, video_path, output_path):
27 | try:
28 | video_path = os.path.abspath(video_path).strip()
29 | output_path = os.path.abspath(output_path).strip()
30 |
31 | #判断output_path是否是一个目录
32 | if not os.path.isdir(video_path):
33 | raise ValueError("video_path:"+video_path+"不是目录(video_path:"+video_path+" is not a directory)")
34 | if not os.path.isdir(output_path):
35 | raise ValueError("output_path:"+output_path+"不是目录(output_path:"+output_path+" is not a directory)")
36 |
37 | video_files = get_video_files(video_path)
38 |
39 | if len(video_files) == 0:
40 | raise ValueError("video_path:"+video_path+"目录下没有视频文件(No video files found in the video_path directory)")
41 |
42 | filelist_file_name = os.path.join(output_path,'filelist.txt')
43 |
44 | with open(filelist_file_name, 'w') as f:
45 | for video in video_files:
46 | f.write(f"file '{video}'\n")
47 |
48 | file_name = set_file_name(video_files[0])
49 | output_path = os.path.join(output_path, file_name)
50 |
51 | command = [
52 | 'ffmpeg', '-f', 'concat','-safe','0','-i',filelist_file_name,
53 | '-c','copy',output_path, # 输出视频路径
54 | ]
55 |
56 | # 执行命令并检查错误
57 | result = subprocess.run(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
58 | # 检查返回码
59 | if result.returncode != 0:
60 | # 如果有错误,输出错误信息
61 | print(f"Error: {result.stderr.decode('utf-8')}")
62 | raise ValueError(f"Error: {result.stderr.decode('utf-8')}")
63 | else:
64 | # 输出标准输出信息
65 | print(result.stdout)
66 |
67 | return (output_path,)
68 | except Exception as e:
69 | raise ValueError(e)
--------------------------------------------------------------------------------
/nodes/mergingVideoByTwo.py:
--------------------------------------------------------------------------------
1 | import os
2 | import subprocess
3 | from ..func import has_audio,getVideoInfo,set_file_name,video_type
4 | import torch
5 |
6 | device = "cuda" if torch.cuda.is_available() else "cpu"
7 |
8 | class MergingVideoByTwo:
9 | def __init__(self):
10 | pass
11 |
12 | @classmethod
13 | def INPUT_TYPES(cls):
14 | return {
15 | "required": {
16 | "video1_path": ("STRING", {"default":"C:/Users/Desktop/video1.mp4",}),
17 | "video2_path": ("STRING", {"default":"C:/Users/Desktop/video2.mp4",}),
18 | "device": (["cpu","cuda"], {"default":device,}),
19 | "resolution_reference": (["video1","video2"], {"default":"video1",}),
20 | "output_path": ("STRING", {"default": "C:/Users/Desktop/output"}),
21 | },
22 | }
23 |
24 | RETURN_TYPES = ("STRING",)
25 | RETURN_NAMES = ("video_complete_path",)
26 | FUNCTION = "merging_video_by_two"
27 | OUTPUT_NODE = True
28 | CATEGORY = "🔥FFmpeg"
29 |
30 | def merging_video_by_two(self, video1_path, video2_path,device,resolution_reference,output_path):
31 | try:
32 | video1_path = os.path.abspath(video1_path).strip()
33 | video2_path = os.path.abspath(video2_path).strip()
34 | output_path = os.path.abspath(output_path).strip()
35 | # 视频不存在
36 | if not video1_path.lower().endswith(video_type()):
37 | raise ValueError("video1_path:"+video1_path+"不是视频文件(video1_path:"+video1_path+" is not a video file)")
38 | if not os.path.isfile(video1_path):
39 | raise ValueError("video1_path:"+video1_path+"不存在(video1_path:"+video1_path+" does not exist)")
40 |
41 | if not video2_path.lower().endswith(video_type()):
42 | raise ValueError("video2_path:"+video2_path+"不是视频文件(video2_path:"+video2_path+" is not a video file)")
43 | if not os.path.isfile(video2_path):
44 | raise ValueError("video2_path:"+video2_path+"不存在(video2_path:"+video2_path+" does not exist)")
45 |
46 | #判断output_path是否是一个目录
47 | if not os.path.isdir(output_path):
48 | raise ValueError("output_path:"+output_path+"不是目录(output_path:"+output_path+" is not a directory)")
49 |
50 | video1_audio = has_audio(video1_path)
51 | video2_audio = has_audio(video2_path)
52 |
53 | final_output = set_file_name(video1_path)
54 |
55 | #文件名根据年月日时分秒来命名
56 | output_path = os.path.join(output_path, final_output)
57 |
58 | video = {
59 | 'video1': video1_path,
60 | 'video2': video2_path,
61 | }.get(resolution_reference, video1_path)
62 |
63 | video_info = getVideoInfo(video)
64 |
65 | width = video_info['width']
66 | height = video_info['height']
67 |
68 | use_cuvid = ""
69 | use_encoder = "-c:v libx264" #默认用CPU编码
70 |
71 | if device == "cuda":
72 | use_cuvid = "-hwaccel cuda"
73 | use_encoder = "-c:v h264_nvenc"
74 |
75 | if video1_audio and video2_audio: #两个视频都有音频
76 | command = f'ffmpeg {use_cuvid} -i {video1_path} -i {video2_path} -filter_complex \
77 | "[0:v]scale={width}:{height},setsar=1,setpts=PTS-STARTPTS[v0]; \
78 | [1:v]scale={width}:{height},setsar=1,setpts=PTS-STARTPTS[v1]; \
79 | [v0][v1]concat=n=2:v=1:a=0[outv]; \
80 | [0:a][1:a]concat=n=2:v=0:a=1[outa]" \
81 | -map "[outv]" -map "[outa]" -r 30 {use_encoder} -c:a aac -ar 44100 -b:a 128k {output_path}'
82 | elif video1_audio and not video2_audio: #第一个视频有音频,第二个没有
83 | command = f'ffmpeg {use_cuvid} -i {video1_path} -i {video2_path} -filter_complex \
84 | "[0:v]scale={width}:{height},setsar=1,setpts=PTS-STARTPTS[v0]; \
85 | [1:v]scale={width}:{height},setsar=1,setpts=PTS-STARTPTS[v1]; \
86 | [v0][v1]concat=n=2:v=1:a=0[outv]" \
87 | -map "[outv]" -map "0:a" -r 30 {use_encoder} -c:a aac -ar 44100 -b:a 128k {output_path}'
88 | elif not video1_audio and video2_audio: #第一个视频没有音频,第二个有
89 | video_info = getVideoInfo(video1_path)
90 | duration = video_info['duration']
91 | delay_time = int(duration * 1000) # 转换为毫秒
92 |
93 | command = f'ffmpeg {use_cuvid} -i {video1_path} -i {video2_path} -filter_complex \
94 | "[0:v]scale={width}:{height},setsar=1,setpts=PTS-STARTPTS[v0]; \
95 | [1:v]scale={width}:{height},setsar=1,setpts=PTS-STARTPTS[v1]; \
96 | [v0][v1]concat=n=2:v=1:a=0[outv]; \
97 | [1:a]adelay={delay_time}|{delay_time}[a1]; \
98 | [a1]concat=n=1:v=0:a=1[outa]" \
99 | -map "[outv]" -map "[outa]" -r 30 {use_encoder} -c:a aac -ar 44100 -b:a 128k {output_path}'
100 | else: #两个视频都没有音频
101 | command = f'ffmpeg {use_cuvid} -i {video1_path} -i {video2_path} -filter_complex \
102 | "[0:v]scale={width}:{height},setsar=1,setpts=PTS-STARTPTS[v0]; \
103 | [1:v]scale={width}:{height},setsar=1,setpts=PTS-STARTPTS[v1]; \
104 | [v0][v1]concat=n=2:v=1:a=0[outv]" \
105 | -map "[outv]" -r 30 {use_encoder} -an {output_path}'
106 |
107 |
108 | # 执行命令并检查错误
109 | result = subprocess.run(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
110 | # 检查返回码
111 | if result.returncode != 0:
112 | # 如果有错误,输出错误信息
113 | print(f"Error: {result.stderr.decode('utf-8')}")
114 | raise ValueError(f"Error: {result.stderr.decode('utf-8')}")
115 | else:
116 | # 输出标准输出信息
117 | print(result.stdout)
118 |
119 | return (output_path,)
120 | except Exception as e:
121 | raise ValueError(e)
--------------------------------------------------------------------------------
/nodes/multiCuttingVideo.py:
--------------------------------------------------------------------------------
1 | import os
2 | import subprocess
3 | from ..func import video_type
4 |
5 | class MultiCuttingVideo:
6 | def __init__(self):
7 | pass
8 |
9 | @classmethod
10 | def INPUT_TYPES(cls):
11 | return {
12 | "required": {
13 | "video_path": ("STRING", {"default":"C:/Users/Desktop/video.mp4",}),
14 | "output_path": ("STRING", {"default":"C:/Users/Desktop/output",}),
15 | "segment_time": ("INT",{"default":10,"min":1,}),
16 | },
17 | }
18 |
19 | RETURN_TYPES = ("STRING",)
20 | RETURN_NAMES = ("video_complete_path",)
21 | FUNCTION = "multi_cutting_video"
22 | OUTPUT_NODE = True
23 | CATEGORY = "🔥FFmpeg"
24 |
25 | # 视频切割,根据关键帧切割,所以时间不能太短,不能保证每一段视频都有关键帧,所以每一段时长不一定是segment_time,只是最接近的
26 | def multi_cutting_video(self, video_path, output_path,segment_time):
27 | try:
28 | video_path = os.path.abspath(video_path).strip()
29 | output_path = os.path.abspath(output_path).strip()
30 | # 视频不存在
31 | if not video_path.lower().endswith(video_type()):
32 | raise ValueError("video_path:"+video_path+"不是视频文件(video_path:"+video_path+" is not a video file)")
33 | if not os.path.isfile(video_path):
34 | raise ValueError("video_path:"+video_path+"不存在(video_path:"+video_path+" does not exist)")
35 |
36 | #判断output_path是否是一个目录
37 | if not os.path.isdir(output_path):
38 | raise ValueError("output_path:"+output_path+"不是目录(output_path:"+output_path+" is not a directory)")
39 |
40 | file_full_name = os.path.basename(video_path)
41 | file_name = os.path.splitext(file_full_name)[0]
42 | file_extension = os.path.splitext(file_full_name)[1]
43 |
44 | #ffmpeg -i input.mp4 -f segment -segment_time 30 -c copy output%03d.mp4
45 |
46 | command = [
47 | 'ffmpeg', '-i', video_path, # 输入视频路径
48 | '-f', 'segment','-reset_timestamps','1',"-segment_time",str(segment_time), # 使用scale滤镜缩放帧
49 | '-c','copy',output_path+os.sep+file_name+"_%08d"+file_extension, # 输出视频路径
50 | ]
51 |
52 | # 执行命令并检查错误
53 | result = subprocess.run(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
54 | # 检查返回码
55 | if result.returncode != 0:
56 | # 如果有错误,输出错误信息
57 | print(f"Error: {result.stderr.decode('utf-8')}")
58 | raise ValueError(f"Error: {result.stderr.decode('utf-8')}")
59 | else:
60 | # 输出标准输出信息
61 | print(result.stdout)
62 |
63 | return (output_path,)
64 | except Exception as e:
65 | raise ValueError(e)
--------------------------------------------------------------------------------
/nodes/pipVideo.py:
--------------------------------------------------------------------------------
1 | import os
2 | import subprocess
3 | from ..func import has_audio,getVideoInfo,set_file_name,video_type
4 | import torch
5 | import math
6 |
7 | device = "cuda" if torch.cuda.is_available() else "cpu"
8 |
9 | class PipVideo:
10 | def __init__(self):
11 | pass
12 |
13 | @classmethod
14 | def INPUT_TYPES(cls):
15 | return {
16 | "required": {
17 | "video1_path": ("STRING", {"default":"C:/Users/Desktop/video1.mp4", "tooltip": "说明:画中画背景画面!"}),
18 | "video2_path": ("STRING", {"default":"C:/Users/Desktop/video2.mp4", "tooltip": "说明:画中画前景画面!"}),
19 | "device": (["cpu","cuda"], {"default":device,}),
20 | "use_audio": (["video1","video2"], {"default":"video1", "tooltip": "说明:最终视频使用哪个视频的音轨!"}),
21 | "use_duration": (["video1","video2"], {"default":"video2", "tooltip": "说明:使用哪个视频作为最终参考时长!"}),
22 | "align_type":(["top-left","top-right", "bottom-left", "bottom-right", "center"], {"default":"center",}),
23 | "pip_fg_zoom": ("FLOAT", { "default": 2.5, "min": 1, "max": 100, "step": 0.5, "tooltip": "说明:画中画背景缩放系数,越大前景画面越小,值为背景宽高的缩小倍数!"}),
24 | "output_path": ("STRING", {"default": "C:/Users/Desktop/output"}),
25 | "scale_and_crop": (["none","540*960", "960*540"], {"default": "none", "tooltip": "说明:缩放和裁剪比例!"}), # 新增参数控制缩放裁剪
26 | "fps": ("FLOAT", {"min": 0, "max": 60, "step": 0.1, "default": 30.0, "tooltip": "说明:画中画合并后的强制帧率,设置为0将使用video2的帧率,设置为1为将使用video2的帧率!"}),
27 | "is_chromakey": ("BOOLEAN", { "default": False ,"label_on": "绿幕去背景", "label_off": "关闭绿幕透明", "tooltip": "说明:是否进行绿幕去背景!"}), #是否画中画 绿幕透明
28 | },
29 | }
30 |
31 | RETURN_TYPES = ("STRING","INT","INT","FLOAT","FLOAT",)
32 | RETURN_NAMES = ("video_complete_paths","width","height","duration","fps",)
33 | FUNCTION = "pip_video"
34 | OUTPUT_NODE = True
35 | CATEGORY = "🔥FFmpeg"
36 | DESCRIPTION = """两个视频叠加成一个画中画效果,可以控制前景video2出现在前景video1画面上的位置,
37 | 可以设置前景画面的缩放系数和是否去掉绿幕背景等设置."""
38 |
39 | def pip_video(self, video1_path, video2_path,device,use_audio,use_duration, align_type,pip_fg_zoom, output_path,scale_and_crop,fps,is_chromakey):
40 | try:
41 | video1_path = os.path.abspath(video1_path).strip()
42 | video2_path = os.path.abspath(video2_path).strip()
43 | output_path = os.path.abspath(output_path).strip()
44 | # 视频不存在
45 | if not video1_path.lower().endswith(video_type()):
46 | raise ValueError("video1_path:"+video1_path+"不是视频文件(video1_path:"+video1_path+" is not a video file)")
47 | if not os.path.isfile(video1_path):
48 | raise ValueError("video1_path:"+video1_path+"不存在(video1_path:"+video1_path+" does not exist)")
49 |
50 | if not video2_path.lower().endswith(video_type()):
51 | raise ValueError("video2_path:"+video2_path+"不是视频文件(video2_path:"+video2_path+" is not a video file)")
52 | if not os.path.isfile(video2_path):
53 | raise ValueError("video2_path:"+video2_path+"不存在(video2_path:"+video2_path+" does not exist)")
54 |
55 | #判断output_path是否是一个目录
56 | if not os.path.isdir(output_path):
57 | raise ValueError("output_path:"+output_path+"不是目录(output_path:"+output_path+" is not a directory)")
58 |
59 | video1_audio = has_audio(video1_path)
60 | video2_audio = has_audio(video2_path)
61 |
62 | final_output = set_file_name(video1_path)
63 | #文件名根据年月日时分秒来命名
64 | output_path = os.path.join(output_path, final_output)
65 |
66 | use_cuvid = ""
67 | use_encoder = "-c:v libx264" #默认用CPU编码
68 |
69 | if device == "cuda":
70 | use_cuvid = "-hwaccel cuda"
71 | use_encoder = "-c:v h264_nvenc"
72 |
73 | video_info = getVideoInfo(video1_path)
74 | video_info1 = getVideoInfo(video2_path)
75 | if use_duration == "video1":
76 | duration_1 = video_info['duration']
77 | else:
78 | duration_1 = video_info1['duration']
79 | if fps==0:
80 | fps=video_info['fps']
81 | elif fps==1:
82 | fps=video_info1['fps']
83 |
84 | # libx264、libx265等编码器要求宽高必须是2的倍数,如果报错,可以把device换为GPU
85 | width = math.ceil(video_info['width']/2)*2
86 | height = math.ceil(video_info['height']/2)*2
87 |
88 | use_audio_index = {
89 | 'video1': '0',
90 | 'video2': '1',
91 | }.get(use_audio, '0')
92 |
93 | align_position = {
94 | "top-left": f"0:0",
95 | "top-right": f"(W-w):0",
96 | "bottom-left": f"0:(H-h)",
97 | "bottom-right": f"(W-w):(H-h)",
98 | "center": f"(W-w)/2:(H-h)/2",
99 | }.get(align_type, f"(W-w)/2:(H-h)/2")
100 |
101 | if height*540/width>960: #如果高同比缩放高度超出960,需要裁剪
102 | pad_or_crop1='crop=540:960:(ow-iw)/2:(oh-ih)/2'
103 | else: #比固定高小,需要填充黑边
104 | pad_or_crop1='pad=540:960:(ow-iw)/2:(oh-ih)/2:color=black'
105 | if height*960/width>540: #如果缩放到宽960时,高超出540,需要裁剪
106 | pad_or_crop2='crop=960:540:(ow-iw)/2:(oh-ih)/2'
107 | else:
108 | pad_or_crop2='pad=960:540:(ow-iw)/2:(oh-ih)/2:color=black'
109 | scale_and_crop_data = {
110 | 'none': 'null',
111 | '540*960': f'scale=540:-1,setsar=1,{pad_or_crop1}',
112 | '960*540': f'scale=960:-1,setsar=1,{pad_or_crop2}',
113 | }.get(scale_and_crop, 'null')
114 |
115 | video2_width = {
116 | 'none': f'{width}',
117 | '540*960': '540',
118 | '960*540': '960',
119 | }.get(scale_and_crop, f'{width}')
120 |
121 | final_out = {
122 | 'none': f'scale={width}:{height}:force_original_aspect_ratio=disable,setsar=1',
123 | '540*960': 'scale=540:960:force_original_aspect_ratio=disable,setsar=1',
124 | '960*540': 'scale=960:540:force_original_aspect_ratio=disable,setsar=1',
125 | }.get(scale_and_crop, f'scale={width}:{height}:force_original_aspect_ratio=disable,setsar=1')
126 |
127 | #测试去绿幕效果
128 | if is_chromakey:
129 | chromakey="chromakey=0x00FF00:0.3:0.1,format=yuva420p"
130 | else:
131 | chromakey="null"
132 |
133 |
134 | if video1_audio or video2_audio:
135 | #-map 1:a 指定使用第二个视频的音频流
136 | command = fr'ffmpeg "-y" {use_cuvid} -stream_loop -1 -i "{video1_path}" -stream_loop -1 -i "{video2_path}" -filter_complex "[0:v]fps={fps},setpts=PTS-STARTPTS[bg];[1:v]fps={fps},setpts=PTS-STARTPTS[fg];[bg]{scale_and_crop_data}[bg_out];[fg]{chromakey}[fgd];[fgd]scale={video2_width}/{pip_fg_zoom}:-1,setsar=1[fg_out];[bg_out][fg_out]overlay={align_position}[out];[out]{final_out}[final_out]" -map "[final_out]" -map {use_audio_index}:a? {use_encoder} -c:a aac -t {duration_1} "{output_path}"'
137 | else:
138 | command = fr'ffmpeg "-y" {use_cuvid} -stream_loop -1 -i "{video1_path}" -stream_loop -1 -i "{video2_path}" -filter_complex "[0:v]fps={fps},setpts=PTS-STARTPTS[bg];[1:v]fps={fps},setpts=PTS-STARTPTS[fg];[bg]{scale_and_crop_data}[bg_out];[fg]{chromakey}[fgd];[fgd]scale={video2_width}/{pip_fg_zoom}:-1,setsar=1[fg_out];[bg_out][fg_out]overlay={align_position}[out];[out]{final_out}[final_out]" -map "[final_out]" -t {duration_1} "{output_path}"'
139 |
140 | print(f">>>{command}")
141 |
142 | # 执行命令并检查错误
143 | result = subprocess.run(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
144 | #print("command result",result.returncode)
145 |
146 | # 检查返回码
147 | if result.returncode != 0:
148 | # 如果有错误,输出错误信息
149 | print(f"Error: {result.stderr.decode('utf-8')}")
150 | if device == "cuda":
151 | print(f"***当前运算模式*[{device}]*************看下换成CPU重新执行,是否解决因编码问题的报错!********")
152 | self.pip_video(video1_path, video2_path,"cpu",use_audio,use_duration, align_type,pip_fg_zoom, os.path.dirname(output_path),scale_and_crop,fps,is_chromakey)
153 |
154 | else:
155 | # 输出标准输出信息
156 | print(f">>FFmpeg 执行完毕!Completed!\t stdout: {result.stdout}")
157 |
158 | return (output_path,width,height,duration_1,fps,)
159 | except Exception as e:
160 | raise ValueError(e)
161 |
162 | #a=StitchingVideo()
163 | #a.stitching_video("C:/Users/wtc/Desktop/tt/l.mp4", "C:/Users/wtc/Desktop/tt/r.mp4", "cpu", "video2", "horizontal","picture-picture", "bottom-left", "C:/Users/wtc/Desktop/tt", "yes")
--------------------------------------------------------------------------------
/nodes/singleCuttingVideo.py:
--------------------------------------------------------------------------------
1 | import os
2 | import subprocess
3 | from datetime import datetime
4 | from ..func import video_type,set_file_name,validate_time_format
5 |
6 | class SingleCuttingVideo:
7 | def __init__(self):
8 | pass
9 |
10 | @classmethod
11 | def INPUT_TYPES(cls):
12 | return {
13 | "required": {
14 | "video_path": ("STRING", {"default":"C:/Users/Desktop/video.mp4",}),
15 | "output_path": ("STRING", {"default":"C:/Users/Desktop/output",}),
16 | "start_time": ("STRING", {"default":"00:00:00",}),
17 | "end_time": ("STRING", {"default":"00:00:10",}),
18 | },
19 | }
20 |
21 | RETURN_TYPES = ("STRING",)
22 | RETURN_NAMES = ("video_complete_path",)
23 | FUNCTION = "single_cutting_video"
24 | OUTPUT_NODE = True
25 | CATEGORY = "🔥FFmpeg"
26 |
27 | # 视频切割,根据关键帧切割,所以时间不能太短,不能保证每一段视频都有关键帧,所以每一段时长不一定是segment_time,只是最接近的
28 | def single_cutting_video(self, video_path, output_path,start_time,end_time):
29 | try:
30 | video_path = os.path.abspath(video_path).strip()
31 | output_path = os.path.abspath(output_path).strip()
32 | # 视频不存在
33 | if not video_path.lower().endswith(video_type()):
34 | raise ValueError("video_path:"+video_path+"不是视频文件(video_path:"+video_path+" is not a video file)")
35 | if not os.path.isfile(video_path):
36 | raise ValueError("video_path:"+video_path+"不存在(video_path:"+video_path+" does not exist)")
37 |
38 | #判断output_path是否是一个目录
39 | if not os.path.isdir(output_path):
40 | raise ValueError("output_path:"+output_path+"不是目录(output_path:"+output_path+" is not a directory)")
41 |
42 | if not validate_time_format(start_time) or not validate_time_format(end_time):
43 | raise ValueError("start_time或者end_time时间格式不对(start_time or end_time is not in time format)")
44 |
45 | time_format = "%H:%M:%S"
46 | start_dt = datetime.strptime(start_time, time_format)
47 | end_dt = datetime.strptime(end_time, time_format)
48 |
49 | if start_dt >= end_dt:
50 | raise ValueError("start_time必须小于end_time(start_time must be less than end_time)")
51 |
52 | file_name = set_file_name(video_path)
53 | output_path = os.path.join(output_path, file_name)
54 | #ffmpeg -i input.mp4 -ss START_TIME -to END_TIME -c copy output.mp4
55 | command = [
56 | 'ffmpeg', '-i', video_path, # 输入视频路径
57 | '-ss', start_time,'-to', end_time,
58 | '-c','copy',output_path
59 | ]
60 |
61 | # 执行命令并检查错误
62 | result = subprocess.run(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
63 | # 检查返回码
64 | if result.returncode != 0:
65 | # 如果有错误,输出错误信息
66 | print(f"Error: {result.stderr.decode('utf-8')}")
67 | raise ValueError(f"Error: {result.stderr.decode('utf-8')}")
68 | else:
69 | # 输出标准输出信息
70 | print(result.stdout)
71 |
72 | return (output_path,)
73 | except Exception as e:
74 | raise ValueError(e)
--------------------------------------------------------------------------------
/nodes/stitchingVideo.py:
--------------------------------------------------------------------------------
1 | import os
2 | import subprocess
3 | from ..func import has_audio,getVideoInfo,set_file_name,video_type
4 | import torch
5 | import math
6 | import time
7 |
8 | device = "cuda" if torch.cuda.is_available() else "cpu"
9 |
10 | class StitchingVideo:
11 | def __init__(self):
12 | pass
13 |
14 | @classmethod
15 | def INPUT_TYPES(cls):
16 | return {
17 | "required": {
18 | "video1_path": ("STRING", {"default":"C:/Users/Desktop/video1.mp4",}),
19 | "video2_path": ("STRING", {"default":"C:/Users/Desktop/video2.mp4",}),
20 | "device": (["cpu","cuda"], {"default":device,}),
21 | "use_audio": (["video1","video2"], {"default":"video1",}),
22 | "stitching_type":(["horizontal","vertical"], {"default":"horizontal",}),
23 | "output_path": ("STRING", {"default": "C:/Users/Desktop/output"}),
24 | "scale_and_crop": (["yes", "no"], {"default": "no"}), # 新增参数控制缩放裁剪
25 | },
26 | }
27 |
28 | RETURN_TYPES = ("STRING",)
29 | RETURN_NAMES = ("video_complete_path",)
30 | FUNCTION = "stitching_video"
31 | OUTPUT_NODE = True
32 | CATEGORY = "🔥FFmpeg"
33 |
34 | def stitching_video(self, video1_path, video2_path,device,use_audio,stitching_type,output_path,scale_and_crop):
35 | try:
36 | video1_path = os.path.abspath(video1_path).strip()
37 | video2_path = os.path.abspath(video2_path).strip()
38 | output_path = os.path.abspath(output_path).strip()
39 | # 视频不存在
40 | if not video1_path.lower().endswith(video_type()):
41 | raise ValueError("video1_path:"+video1_path+"不是视频文件(video1_path:"+video1_path+" is not a video file)")
42 | if not os.path.isfile(video1_path):
43 | raise ValueError("video1_path:"+video1_path+"不存在(video1_path:"+video1_path+" does not exist)")
44 |
45 | if not video2_path.lower().endswith(video_type()):
46 | raise ValueError("video2_path:"+video2_path+"不是视频文件(video2_path:"+video2_path+" is not a video file)")
47 | if not os.path.isfile(video2_path):
48 | raise ValueError("video2_path:"+video2_path+"不存在(video2_path:"+video2_path+" does not exist)")
49 |
50 | #判断output_path是否是一个目录
51 | if not os.path.isdir(output_path):
52 | raise ValueError("output_path:"+output_path+"不是目录(output_path:"+output_path+" is not a directory)")
53 |
54 | video1_audio = has_audio(video1_path)
55 | video2_audio = has_audio(video2_path)
56 |
57 | final_output = set_file_name(video1_path)
58 |
59 | #文件名根据年月日时分秒来命名
60 | output_path = os.path.join(output_path, final_output)
61 |
62 | #CPU默认参数:
63 | use_cuvid = ""
64 | use_encoder = "-c:v libx264" #默认用CPU编码
65 |
66 | if device == "cuda":
67 | use_cuvid = "-hwaccel cuda"
68 | use_encoder = "-c:v h264_nvenc"
69 |
70 | video_info = getVideoInfo(video1_path)
71 | video_info1 = getVideoInfo(video2_path)
72 | duration = video_info['duration']
73 | fps = video_info['fps']
74 |
75 | loop_count = max(1, int(duration / video_info1['duration'] + 0.9999))
76 | # libx264、libx265等编码器要求宽高必须是2的倍数,如果报错,可以把device换为GPU
77 | width = math.ceil(video_info['width']/2)*2
78 | height = math.ceil(video_info['height']/2)*2
79 |
80 | use_audio = {
81 | 'video1': '0',
82 | 'video2': '1',
83 | }.get(use_audio, '0')
84 |
85 | tack_type = {
86 | 'horizontal': 'hstack',
87 | 'vertical': 'vstack',
88 | }.get(stitching_type, 'hstack')
89 |
90 | scale = {
91 | 'horizontal':f'-1:{height}',
92 | 'vertical': f'{width}:-1',
93 | }.get(stitching_type, f'{width}:-1')
94 |
95 | print(f">>loop_count:{loop_count}")
96 | if video1_audio or video2_audio:
97 | #-map 1:a 指定使用第二个视频的音频流
98 | command = f'ffmpeg {use_cuvid} -i {video1_path} -i {video2_path} -filter_complex "[1:v]scale={scale}[vid2];[0:v][vid2]{tack_type}=inputs=2[v]" -map "[v]" -map {use_audio}:a? {use_encoder} -c:a aac -strict experimental {output_path}'
99 | else:
100 | command = f'ffmpeg {use_cuvid} -i {video1_path} -i {video2_path} -filter_complex "[1:v]scale={scale}[vid2];[0:v][vid2]{tack_type}=inputs=2[v]" -map "[v]" {use_encoder} {output_path}'
101 |
102 |
103 | # 执行命令并检查错误
104 | result = subprocess.run(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
105 |
106 | # 构建滤镜链
107 | if scale_and_crop == "yes":
108 | time.sleep(1)
109 | if not os.path.isfile(output_path) or os.path.getsize(output_path) <= 0:
110 | print(f"输出视频:{output_path} 不存在或内容为空!")
111 | return (output_path,)
112 | crop_video_path = os.path.join(os.path.dirname(output_path), "crop--" + final_output)
113 |
114 | if stitching_type == "vertical":
115 | # 修改后的垂直处理:缩放并裁剪至540x960,确保尺寸足够
116 | command = f'ffmpeg -y -i "{output_path}" -filter_complex "[0:v]scale=w={width}:h={height}:force_original_aspect_ratio=increase[scaled];[scaled]crop={width}:{height}[out]" -map "[out]" -map 0:a {use_encoder} -c:a aac "{crop_video_path}"'
117 | else:
118 | # 水平模式保持原逻辑
119 | command = f'ffmpeg -y -i "{output_path}" -filter_complex "[0:v]split=2[bg][fg];[bg]scale={width}:-1,setsar=1[scaled_bg];[scaled_bg]gblur=sigma=10[blurred];[blurred]scale={width}:{height}:force_original_aspect_ratio=disable[bg_out];[fg]scale={width}:-1,setsar=1[fg_out];[bg_out][fg_out]overlay=(W-w)/2:(H-h)/2[out];[out]scale={width}:{height}:force_original_aspect_ratio=disable,setsar=1[final_out]" -map "[final_out]" -map 0:a {use_encoder} -c:a aac "{crop_video_path}"'
120 |
121 | print(f">>FFmpeg 缩放与裁剪命令:: {command}")
122 | result = subprocess.run(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
123 | output_path = crop_video_path
124 |
125 | # 检查返回码
126 | if result.returncode != 0:
127 | # 如果有错误,输出错误信息
128 | print(f"Error: {result.stderr.decode('utf-8')}")
129 | else:
130 | # 输出标准输出信息
131 | print(f">>FFmpeg 执行完毕!Completed!\t stdout: {result.stdout}")
132 | return (output_path,)
133 | except Exception as e:
134 | raise ValueError(e)
--------------------------------------------------------------------------------
/nodes/video2frames.py:
--------------------------------------------------------------------------------
1 | import json
2 | import math
3 | import os
4 | import subprocess
5 | from ..func import video_type
6 |
7 | class Video2Frames:
8 | def __init__(self):
9 | pass
10 |
11 | @classmethod
12 | def INPUT_TYPES(cls):
13 | return {
14 | "required": {
15 | "video_path": ("STRING", {"default":"C:/Users/Desktop/video.mp4",}),
16 | "output_path": ("STRING", {"default":"C:/Users/Desktop/output",}),
17 | "frames_max_width":("INT", {"default": 0, "min": 0, "max": 1920}),
18 | },
19 | }
20 |
21 | RETURN_TYPES = ("STRING", "FLOAT", "STRING", "INT","STRING")
22 | RETURN_NAMES = ("frame_path", "fps", "audio_path", "total_frames","output_path")
23 | FUNCTION = "video2frames"
24 | OUTPUT_NODE = True
25 | CATEGORY = "🔥FFmpeg"
26 |
27 | def video2frames(self, video_path, output_path, frames_max_width):
28 | try:
29 | video_path = os.path.abspath(video_path).strip()
30 | output_path = os.path.abspath(output_path).strip()
31 | # 提取音频
32 | audio_path = os.path.join(output_path, 'audio.mp3')
33 | # 视频不存在
34 | if not video_path.lower().endswith(video_type()):
35 | raise ValueError("video_path:"+video_path+"不是视频文件(video_path:"+video_path+" is not a video file)")
36 | print("视频文件路径:"+video_path)
37 | if not os.path.isfile(video_path):
38 | raise ValueError("video_path:"+video_path+"不存在(video_path:"+video_path+" does not exist)")
39 |
40 | #判断output_path是否是一个目录
41 | if not os.path.isdir(output_path):
42 | raise ValueError("output_path:"+output_path+"不是目录(output_path:"+output_path+" is not a directory)")
43 |
44 | # 判断frames_max_width是否是一个整数
45 | if not isinstance(frames_max_width, int):
46 | raise ValueError("frames_max_width不是整数(frames_max_width is not an integer)")
47 |
48 | audio_cmd = [
49 | 'ffmpeg', '-i', video_path,
50 | '-q:a', '0', '-map', 'a', '-y', audio_path
51 | ]
52 | subprocess.run(audio_cmd)
53 |
54 | # 获取视频帧率、时长、宽高信息
55 | command = [
56 | 'ffprobe', '-v', 'error', '-select_streams', 'v:0', '-show_entries',
57 | 'stream=avg_frame_rate,nb_frames,duration,width,height', '-of', 'json', video_path
58 | ]
59 |
60 | # 运行ffprobe命令
61 | result = subprocess.run(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
62 | # 将输出转化为字符串
63 | output = result.stdout.decode('utf-8').strip()
64 | print(output)
65 | data = json.loads(output)
66 | # 查找视频流信息
67 | if 'streams' in data and len(data['streams']) > 0:
68 | stream = data['streams'][0] # 获取第一个视频流
69 | fps = stream.get('avg_frame_rate')
70 | if fps is not None:
71 | # 帧率可能是一个分数形式的字符串,例如 "30/1" 或 "20.233000"
72 | if '/' in fps:
73 | num, denom = map(int, fps.split('/'))
74 | fps = num / denom
75 | else:
76 | fps = float(fps) # 直接转换为浮点数
77 | else:
78 | raise ValueError("无法获取视频的帧率")
79 | width = int(stream.get('width'))
80 | height = int(stream.get('height'))
81 | duration = float(stream.get('duration'))
82 | else:
83 | raise ValueError("无法获取视频信息")
84 |
85 | nb_frames = stream.get('nb_frames')
86 | if nb_frames and nb_frames.isdigit():
87 | total_frames = int(nb_frames)
88 | else:
89 | # 计算总帧数
90 | total_frames = math.ceil(fps * duration)
91 | print(f"视频的帧率是: {fps}, 宽度是: {width}, 高度是: {height}, 时长是: {duration}, 总帧数是: {total_frames}")
92 | # 提取帧
93 | frame_path = os.path.join(output_path, 'frames')
94 | os.makedirs(frame_path, exist_ok=True) # exist_ok=True表示如果目录已经存在,不会引发异常
95 |
96 | # 计算输出宽度和高度以保持比例
97 | if frames_max_width > 0:
98 | if width > frames_max_width:
99 | out_width = frames_max_width
100 | out_height = int(height * frames_max_width / width) # 按比例计算新高度
101 | else:
102 | out_width = width
103 | out_height = height
104 | else:
105 | out_width = width
106 | out_height = height
107 |
108 | command = [
109 | 'ffmpeg', '-i', video_path, # 输入视频路径
110 | '-vf', f'scale={out_width}:{out_height}', # 使用scale滤镜缩放帧
111 | os.path.join(frame_path, 'frame_%08d.png') # 输出帧路径
112 | ]
113 | # 执行命令并检查错误
114 | result = subprocess.run(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
115 | # 检查返回码
116 | if result.returncode != 0:
117 | # 如果有错误,输出错误信息
118 | print(f"Error: {result.stderr.decode('utf-8')}")
119 | raise ValueError(f"Error: {result.stderr.decode('utf-8')}")
120 | else:
121 | # 输出标准输出信息
122 | print(result.stdout)
123 |
124 | return (frame_path, fps, audio_path, total_frames,output_path)
125 | except Exception as e:
126 | raise ValueError(e)
--------------------------------------------------------------------------------
/nodes/videoFlip.py:
--------------------------------------------------------------------------------
1 | import os
2 | import subprocess
3 | from ..func import set_file_name,video_type
4 |
5 | class VideoFlip:
6 | def __init__(self):
7 | pass
8 |
9 | @classmethod
10 | def INPUT_TYPES(cls):
11 | return {
12 | "required": {
13 | "video_path": ("STRING", {"default":"C:/Users/Desktop/video.mp4",}),
14 | "output_path": ("STRING", {"default":"C:/Users/Desktop/output",}),
15 | "flip_type": (["horizontal","vertical","both"], {"default":"horizontal",}),
16 | },
17 | }
18 |
19 | RETURN_TYPES = ("STRING",)
20 | RETURN_NAMES = ("video_complete_path",)
21 | FUNCTION = "video_flip"
22 | OUTPUT_NODE = True
23 | CATEGORY = "🔥FFmpeg"
24 |
25 | def video_flip(self, video_path, output_path, flip_type):
26 | try:
27 | video_path = os.path.abspath(video_path).strip()
28 | output_path = os.path.abspath(output_path).strip()
29 | # 视频不存在
30 | if not video_path.lower().endswith(video_type()):
31 | raise ValueError("video_path:"+video_path+"不是视频文件(video_path:"+video_path+" is not a video file)")
32 | if not os.path.isfile(video_path):
33 | raise ValueError("video_path:"+video_path+"不存在(video_path:"+video_path+" does not exist)")
34 |
35 | #判断output_path是否是一个目录
36 | if not os.path.isdir(output_path):
37 | raise ValueError("output_path:"+output_path+"不是目录(output_path:"+output_path+" is not a directory)")
38 |
39 | file_name = set_file_name(video_path)
40 |
41 | output_path = os.path.join(output_path, file_name)
42 | flip = {
43 | 'horizontal': 'hflip',
44 | 'vertical': 'vflip',
45 | 'both': 'hflip,vflip',
46 | }.get(flip_type, 'horizontal') # 默认为水平翻转
47 |
48 | command = [
49 | 'ffmpeg', '-i', video_path, # 输入视频路径
50 | '-vf', flip, # 使用scale滤镜缩放帧
51 | output_path,
52 | ]
53 |
54 | # 执行命令并检查错误
55 | result = subprocess.run(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
56 | # 检查返回码
57 | if result.returncode != 0:
58 | # 如果有错误,输出错误信息
59 | print(f"Error: {result.stderr.decode('utf-8')}")
60 | raise ValueError(f"Error: {result.stderr.decode('utf-8')}")
61 | else:
62 | # 输出标准输出信息
63 | print(result.stdout)
64 |
65 | return (output_path,)
66 | except Exception as e:
67 | raise ValueError(e)
--------------------------------------------------------------------------------
/nodes/videoTransition.py:
--------------------------------------------------------------------------------
1 | import os
2 | import subprocess
3 | from ..func import set_file_name,video_type,getVideoInfo,get_xfade_transitions,has_audio
4 | import torch
5 |
6 | device = "cuda" if torch.cuda.is_available() else "cpu"
7 |
8 |
9 | class VideoTransition:
10 | def __init__(self):
11 | pass
12 |
13 | @classmethod
14 | def INPUT_TYPES(cls):
15 | return {
16 | "required": {
17 | "video1_path": ("STRING", {"default":"C:/Users/Desktop/video1.mp4",}),
18 | "video2_path": ("STRING", {"default":"C:/Users/Desktop/video2.mp4",}),
19 | #视频尺寸、帧率参考哪个视频
20 | "reference_video": (["video1","video2"], {"default":"video1","tooltip": "参考视频是哪个视频,决定了输出视频的尺寸和帧率!(Reference video is which video, determines the size and frame rate of the output video!)"}),
21 | "device": (["cpu","cuda"], {"default":device,}),
22 | "transition": (get_xfade_transitions(),{"default": "fade",}),
23 | "transition_duration": ("FLOAT",{"default":1,"min":0.1,"max":3.0,"step":0.1,"display":"number","tooltip": "转场持续时间,单位秒,最大值为3秒,不能小于0.1秒!(Transition duration, in seconds, the maximum value is 3 seconds, cannot be less than 0.1 seconds!)"}),
24 | "offset": ("FLOAT",{"default":1,"min":0.1,"step":0.1,"display":"number","tooltip": "转场开始时间,单位秒,不能大于等于视频1的时长减去转场持续时间(transition_duration)!(Transition start time, in seconds, cannot be greater than or equal to the duration of video1 minus the transition duration (transition_duration)!)"}),
25 | "output_path": ("STRING", {"default":"C:/Users/Desktop/output",}),
26 | },
27 | }
28 |
29 | RETURN_TYPES = ("STRING",)
30 | RETURN_NAMES = ("video_complete_path",)
31 | FUNCTION = "video_transition"
32 | OUTPUT_NODE = True
33 | CATEGORY = "🔥FFmpeg"
34 |
35 | def video_transition(self, video1_path, video2_path,reference_video, device,transition, transition_duration, offset,output_path):
36 | try:
37 | video1_path = os.path.abspath(video1_path).strip()
38 | video2_path = os.path.abspath(video2_path).strip()
39 | output_path = os.path.abspath(output_path).strip()
40 | # 视频不存在
41 | if not video1_path.lower().endswith(video_type()):
42 | raise ValueError("video1_path:"+video1_path+"不是视频文件(video1_path:"+video1_path+" is not a video file)")
43 | if not os.path.isfile(video1_path):
44 | raise ValueError("video1_path:"+video1_path+"不存在(video1_path:"+video1_path+" does not exist)")
45 |
46 | if not video2_path.lower().endswith(video_type()):
47 | raise ValueError("video2_path:"+video2_path+"不是视频文件(video2_path:"+video2_path+" is not a video file)")
48 | if not os.path.isfile(video2_path):
49 | raise ValueError("video2_path:"+video2_path+"不存在(video2_path:"+video2_path+" does not exist)")
50 |
51 | #判断output_path是否是一个目录
52 | if not os.path.isdir(output_path):
53 | raise ValueError("output_path:"+output_path+"不是目录(output_path:"+output_path+" is not a directory)")
54 |
55 | #offset不能大于视频1的时长-transition_duration
56 | video1_info = getVideoInfo(video1_path)
57 | reference_video_info = getVideoInfo(video1_path if reference_video == "video1" else video2_path)
58 | if video1_info is None:
59 | raise ValueError("无法获取视频1的信息(Cannot get video1 information)")
60 |
61 | if offset >= video1_info['duration'] - transition_duration:
62 | raise ValueError("offset:"+str(offset)+"不能大于等于(视频1的时长-transition_duration),其中视频1的时长减去transition_duration为:"+str(video1_info['duration']-transition_duration)+"(offset:"+str(offset)+" cannot be greater than (video1 duration - transition_duration), where video1 duration minus transition_duration is:"+str(video1_info['duration']-transition_duration))
63 |
64 | use_cuvid = [] # 改为列表
65 | use_encoder = "-c:v libx264" # 默认用CPU编码
66 |
67 | if device == "cuda":
68 | use_cuvid = ['-hwaccel', 'cuda'] # 分开传递参数
69 | use_encoder = "-c:v h264_nvenc"
70 |
71 | file_name = set_file_name(video1_path)
72 |
73 | output_path = os.path.join(output_path, file_name)
74 |
75 | target_width = reference_video_info['width']
76 | target_height = reference_video_info['height']
77 | target_fps = reference_video_info['fps']
78 |
79 | has_audio1 = has_audio(video1_path)
80 | has_audio2 = has_audio(video2_path)
81 |
82 | filter_complex = (
83 | # 先将两个视频缩放到相同尺寸、帧率
84 | f'[0:v]settb=AVTB,fps={target_fps},format=yuv420p,'
85 | f'scale={target_width}:{target_height}:force_original_aspect_ratio=decrease,pad={target_width}:{target_height}:(ow-iw)/2:(oh-ih)/2[v0];'
86 |
87 | f'[1:v]settb=AVTB,fps={target_fps},format=yuv420p,'
88 | f'scale={target_width}:{target_height}:force_original_aspect_ratio=decrease,pad={target_width}:{target_height}:(ow-iw)/2:(oh-ih)/2[v1];'
89 | # 视频转场(使用缩放后的视频流)
90 | f'[v0][v1]xfade=transition={transition}:duration={transition_duration}:offset={offset}[outv]'
91 | )
92 |
93 | if has_audio1 and has_audio2: # 两个视频都有音频
94 | filter_complex += (
95 | f';[0:a]asplit[a0l][a0r];'
96 | f'[1:a]asplit[a1l][a1r];'
97 | f'[a0l]atrim=0:{offset}[a0start];'
98 | f'[a0r]atrim={offset}:{offset+transition_duration},asetpts=PTS-STARTPTS[a0end];'
99 | f'[a1l]atrim=0:{transition_duration},asetpts=PTS-STARTPTS[a1start];'
100 | f'[a1r]atrim={transition_duration},asetpts=PTS-STARTPTS[a1end];'
101 | f'[a0end][a1start]acrossfade=duration={transition_duration}[across];'
102 | f'[a0start][across][a1end]concat=n=3:v=0:a=1[outa]'
103 | )
104 | elif has_audio1: # 只有第一个视频有音频
105 | filter_complex += (
106 | f';[0:a]atrim=0:{offset+transition_duration}[a0]' # 只保留到转场结束
107 | )
108 | elif has_audio2: # 只有第二个视频有音频
109 | filter_complex += (
110 | # 从转场开始时间开始截取音频
111 | f';[1:a]atrim=0,asetpts=PTS-STARTPTS+{offset}/TB[a1]'
112 | )
113 |
114 | command = ['ffmpeg']
115 |
116 | if use_cuvid:
117 | command.extend(use_cuvid) # 使用extend添加CUDA参数
118 |
119 | command.extend([
120 | '-i', video1_path,
121 | '-i', video2_path,
122 | '-filter_complex', filter_complex,
123 | '-map', '[outv]',
124 | ])
125 |
126 | # 只有在两个视频都有音频时才添加音频映射
127 | if has_audio1 and has_audio2:
128 | command.extend(['-map', '[outa]'])
129 | elif has_audio1:
130 | command.extend(['-map', '[a0]'])
131 | elif has_audio2:
132 | command.extend(['-map', '[a1]'])
133 | if use_encoder:
134 | command.extend(use_encoder.split())
135 |
136 | command.append(f'{output_path}.mp4')
137 |
138 | # 执行命令
139 | result = subprocess.run(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
140 | # 检查返回码
141 | if result.returncode != 0:
142 | # 如果有错误,输出错误信息
143 | print(f"Error: {result.stderr.decode('utf-8')}")
144 | raise ValueError(f"Error: {result.stderr.decode('utf-8')}")
145 | else:
146 | # 输出标准输出信息
147 | print(result.stdout)
148 |
149 | return (output_path,)
150 | except Exception as e:
151 | raise ValueError(e)
--------------------------------------------------------------------------------
/nodes_map.py:
--------------------------------------------------------------------------------
1 | from .nodes.addTextWatermark import *
2 | from .nodes.frames2video import *
3 | from .nodes.video2frames import *
4 | from .nodes.addImgWatermark import *
5 | from .nodes.videoFlip import *
6 | from .nodes.extractAudio import *
7 | from .nodes.loadImageFromDir import *
8 | from .nodes.imageCopy import *
9 | from .nodes.imagePath2Tensor import *
10 | from .nodes.mergingVideoByTwo import *
11 | from .nodes.mergingVideoByPlenty import *
12 | from .nodes.stitchingVideo import *
13 | from .nodes.multiCuttingVideo import *
14 | from .nodes.singleCuttingVideo import *
15 | from .nodes.addAudio import *
16 | from .nodes.imagesSave import *
17 | from .nodes.pipVideo import *
18 | from .nodes.videoTransition import *
19 |
20 | NODE_CLASS_MAPPINGS = {
21 | "Video2Frames": Video2Frames,
22 | "Frames2Video": Frames2Video,
23 | "AddTextWatermark": AddTextWatermark,
24 | "AddImgWatermark": AddImgWatermark,
25 | "VideoFlip": VideoFlip,
26 | "ExtractAudio": ExtractAudio,
27 | "LoadImageFromDir": LoadImageFromDir,
28 | "ImageCopy": ImageCopy,
29 | "ImagePath2Tensor": ImagePath2Tensor,
30 | "MergingVideoByTwo": MergingVideoByTwo,
31 | "MergingVideoByPlenty": MergingVideoByPlenty,
32 | "StitchingVideo": StitchingVideo,
33 | "MultiCuttingVideo": MultiCuttingVideo,
34 | "SingleCuttingVideo": SingleCuttingVideo,
35 | "AddAudio": AddAudio,
36 | "ImagesSave": ImagesSave,
37 | "PipVideo": PipVideo,
38 | "VideoTransition": VideoTransition,
39 | }
40 |
41 | # A dictionary that contains the friendly/humanly readable titles for the nodes
42 | NODE_DISPLAY_NAME_MAPPINGS = {
43 | "Video2Frames": "🔥Video2Frames",
44 | "Frames2Video": "🔥Frames2Video",
45 | "AddTextWatermark": "🔥AddTextWatermark",
46 | "AddImgWatermark": "🔥AddImgWatermark",
47 | "VideoFlip": "🔥VideoFlip",
48 | "ExtractAudio": "🔥ExtractAudio",
49 | "LoadImageFromDir": "🔥LoadImageFromDir",
50 | "ImageCopy": "🔥ImageCopy",
51 | "ImagePath2Tensor": "🔥ImagePath2Tensor",
52 | "MergingVideoByTwo": "🔥MergingVideoByTwo",
53 | "MergingVideoByPlenty": "🔥MergingVideoByPlenty",
54 | "StitchingVideo": "🔥StitchingVideo",
55 | "MultiCuttingVideo": "🔥MultiCuttingVideo",
56 | "SingleCuttingVideo": "🔥SingleCuttingVideo",
57 | "AddAudio": "🔥AddAudio",
58 | "ImagesSave": "🔥ImagesSave",
59 | "PipVideo": "🔥PipVideo",
60 | "VideoTransition": "🔥VideoTransition",
61 | }
62 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | pathlib
--------------------------------------------------------------------------------