├── Diffutoon_color_jupyter.ipynb
├── Diffutoon_jupyter.ipynb
└── README.md
/Diffutoon_color_jupyter.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {
6 | "id": "view-in-github"
7 | },
8 | "source": [
9 | "[](https://colab.research.google.com/github/camenduru/Diffutoon-jupyter/blob/main/Diffutoon_color_jupyter.ipynb)"
10 | ]
11 | },
12 | {
13 | "cell_type": "code",
14 | "execution_count": null,
15 | "metadata": {
16 | "id": "VjYy0F2gZIPR"
17 | },
18 | "outputs": [],
19 | "source": [
20 | "# https://github.com/modelscope/DiffSynth-Studio/blob/main/examples/Diffutoon/Diffutoon.ipynb modified\n",
21 | "\n",
22 | "%cd /content\n",
23 | "!git clone https://github.com/Artiprocher/DiffSynth-Studio\n",
24 | "%cd /content/DiffSynth-Studio\n",
25 | "\n",
26 | "!pip install -q einops transformers controlnet-aux==0.0.7 sentencepiece imageio imageio-ffmpeg\n",
27 | "\n",
28 | "!apt -y install -qq aria2\n",
29 | "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://civitai.com/api/download/models/229575 -d /content/DiffSynth-Studio/models/stable_diffusion -o aingdiffusion_v12.safetensors\n",
30 | "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/guoyww/animatediff/resolve/main/mm_sd_v15_v2.ckpt -d /content/DiffSynth-Studio/models/AnimateDiff -o mm_sd_v15_v2.ckpt\n",
31 | "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lllyasviel/ControlNet-v1-1/resolve/main/control_v11p_sd15_lineart.pth -d /content/DiffSynth-Studio/models/ControlNet -o control_v11p_sd15_lineart.pth\n",
32 | "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lllyasviel/ControlNet-v1-1/resolve/main/control_v11f1e_sd15_tile.pth -d /content/DiffSynth-Studio/models/ControlNet -o control_v11f1e_sd15_tile.pth\n",
33 | "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lllyasviel/ControlNet-v1-1/resolve/main/control_v11f1p_sd15_depth.pth -d /content/DiffSynth-Studio/models/ControlNet -o control_v11f1p_sd15_depth.pth\n",
34 | "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lllyasviel/ControlNet-v1-1/resolve/main/control_v11p_sd15_softedge.pth -d /content/DiffSynth-Studio/models/ControlNet -o control_v11p_sd15_softedge.pth\n",
35 | "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lllyasviel/Annotators/resolve/main/dpt_hybrid-midas-501f0c75.pt -d /content/DiffSynth-Studio/models/Annotators -o dpt_hybrid-midas-501f0c75.pt\n",
36 | "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lllyasviel/Annotators/resolve/main/ControlNetHED.pth -d /content/DiffSynth-Studio/models/Annotators -o ControlNetHED.pth\n",
37 | "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lllyasviel/Annotators/resolve/main/sk_model.pth -d /content/DiffSynth-Studio/models/Annotators -o sk_model.pth\n",
38 | "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lllyasviel/Annotators/resolve/main/sk_model2.pth -d /content/DiffSynth-Studio/models/Annotators -o sk_model2.pth\n",
39 | "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M \"https://civitai.com/api/download/models/25820?type=Model&format=PickleTensor&size=full&fp=fp16\" -d /content/DiffSynth-Studio/models/textual_inversion -o verybadimagenegative_v1.3.pt\n",
40 | "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/Diffutoon/resolve/main/input_video.mp4 -d /content -o input_video.mp4"
41 | ]
42 | },
43 | {
44 | "cell_type": "code",
45 | "execution_count": null,
46 | "metadata": {},
47 | "outputs": [],
48 | "source": [
49 | "config_stage_1_template = {\n",
50 | " \"models\": {\n",
51 | " \"model_list\": [\n",
52 | " \"models/stable_diffusion/aingdiffusion_v12.safetensors\",\n",
53 | " \"models/ControlNet/control_v11p_sd15_softedge.pth\",\n",
54 | " \"models/ControlNet/control_v11f1p_sd15_depth.pth\"\n",
55 | " ],\n",
56 | " \"textual_inversion_folder\": \"models/textual_inversion\",\n",
57 | " \"device\": \"cuda\",\n",
58 | " \"lora_alphas\": [],\n",
59 | " \"controlnet_units\": [\n",
60 | " {\n",
61 | " \"processor_id\": \"softedge\",\n",
62 | " \"model_path\": \"models/ControlNet/control_v11p_sd15_softedge.pth\",\n",
63 | " \"scale\": 0.5\n",
64 | " },\n",
65 | " {\n",
66 | " \"processor_id\": \"depth\",\n",
67 | " \"model_path\": \"models/ControlNet/control_v11f1p_sd15_depth.pth\",\n",
68 | " \"scale\": 0.5\n",
69 | " }\n",
70 | " ]\n",
71 | " },\n",
72 | " \"data\": {\n",
73 | " \"input_frames\": {\n",
74 | " \"video_file\": \"/content/input_video.mp4\",\n",
75 | " \"image_folder\": None,\n",
76 | " \"height\": 512,\n",
77 | " \"width\": 512,\n",
78 | " \"start_frame_id\": 0,\n",
79 | " \"end_frame_id\": 30\n",
80 | " },\n",
81 | " \"controlnet_frames\": [\n",
82 | " {\n",
83 | " \"video_file\": \"/content/input_video.mp4\",\n",
84 | " \"image_folder\": None,\n",
85 | " \"height\": 512,\n",
86 | " \"width\": 512,\n",
87 | " \"start_frame_id\": 0,\n",
88 | " \"end_frame_id\": 30\n",
89 | " },\n",
90 | " {\n",
91 | " \"video_file\": \"/content/input_video.mp4\",\n",
92 | " \"image_folder\": None,\n",
93 | " \"height\": 512,\n",
94 | " \"width\": 512,\n",
95 | " \"start_frame_id\": 0,\n",
96 | " \"end_frame_id\": 30\n",
97 | " }\n",
98 | " ],\n",
99 | " \"output_folder\": \"data/examples/diffutoon_edit/color_video\",\n",
100 | " \"fps\": 25\n",
101 | " },\n",
102 | " \"smoother_configs\": [\n",
103 | " {\n",
104 | " \"processor_type\": \"FastBlend\",\n",
105 | " \"config\": {}\n",
106 | " }\n",
107 | " ],\n",
108 | " \"pipeline\": {\n",
109 | " \"seed\": 0,\n",
110 | " \"pipeline_inputs\": {\n",
111 | " \"prompt\": \"best quality, perfect anime illustration, orange clothes, night, a girl is dancing, smile, solo, black silk stockings\",\n",
112 | " \"negative_prompt\": \"verybadimagenegative_v1.3\",\n",
113 | " \"cfg_scale\": 7.0,\n",
114 | " \"clip_skip\": 1,\n",
115 | " \"denoising_strength\": 0.9,\n",
116 | " \"num_inference_steps\": 20,\n",
117 | " \"animatediff_batch_size\": 8,\n",
118 | " \"animatediff_stride\": 4,\n",
119 | " \"unet_batch_size\": 8,\n",
120 | " \"controlnet_batch_size\": 8,\n",
121 | " \"cross_frame_attention\": True,\n",
122 | " \"smoother_progress_ids\": [-1],\n",
123 | " # The following parameters will be overwritten. You don't need to modify them.\n",
124 | " \"input_frames\": [],\n",
125 | " \"num_frames\": 30,\n",
126 | " \"width\": 512,\n",
127 | " \"height\": 512,\n",
128 | " \"controlnet_frames\": []\n",
129 | " }\n",
130 | " }\n",
131 | "}\n",
132 | "\n",
133 | "from diffsynth import SDVideoPipelineRunner\n",
134 | "\n",
135 | "config_stage_1 = config_stage_1_template.copy()\n",
136 | "config_stage_1[\"data\"][\"input_frames\"] = {\n",
137 | " \"video_file\": \"/content/input_video.mp4\",\n",
138 | " \"image_folder\": None,\n",
139 | " \"height\": 512,\n",
140 | " \"width\": 512,\n",
141 | " \"start_frame_id\": 0,\n",
142 | " \"end_frame_id\": 30\n",
143 | "}\n",
144 | "config_stage_1[\"data\"][\"controlnet_frames\"] = [config_stage_1[\"data\"][\"input_frames\"], config_stage_1[\"data\"][\"input_frames\"]]\n",
145 | "config_stage_1[\"data\"][\"output_folder\"] = \"/content/color_video\"\n",
146 | "config_stage_1[\"data\"][\"fps\"] = 25\n",
147 | "config_stage_1[\"pipeline\"][\"pipeline_inputs\"][\"prompt\"] = \"best quality, perfect anime illustration, orange clothes, night, a girl is dancing, smile, solo, black silk stockings\"\n",
148 | "\n",
149 | "runner = SDVideoPipelineRunner()\n",
150 | "runner.run(config_stage_1)"
151 | ]
152 | },
153 | {
154 | "cell_type": "code",
155 | "execution_count": null,
156 | "metadata": {},
157 | "outputs": [],
158 | "source": [
159 | "# If T4, at this point, restart and run the next cell."
160 | ]
161 | },
162 | {
163 | "cell_type": "code",
164 | "execution_count": null,
165 | "metadata": {},
166 | "outputs": [],
167 | "source": [
168 | "config_stage_2_template = {\n",
169 | " \"models\": {\n",
170 | " \"model_list\": [\n",
171 | " \"models/stable_diffusion/aingdiffusion_v12.safetensors\",\n",
172 | " \"models/AnimateDiff/mm_sd_v15_v2.ckpt\",\n",
173 | " \"models/ControlNet/control_v11f1e_sd15_tile.pth\",\n",
174 | " \"models/ControlNet/control_v11p_sd15_lineart.pth\"\n",
175 | " ],\n",
176 | " \"textual_inversion_folder\": \"models/textual_inversion\",\n",
177 | " \"device\": \"cuda\",\n",
178 | " \"lora_alphas\": [],\n",
179 | " \"controlnet_units\": [\n",
180 | " {\n",
181 | " \"processor_id\": \"tile\",\n",
182 | " \"model_path\": \"models/ControlNet/control_v11f1e_sd15_tile.pth\",\n",
183 | " \"scale\": 0.5\n",
184 | " },\n",
185 | " {\n",
186 | " \"processor_id\": \"lineart\",\n",
187 | " \"model_path\": \"models/ControlNet/control_v11p_sd15_lineart.pth\",\n",
188 | " \"scale\": 0.5\n",
189 | " }\n",
190 | " ]\n",
191 | " },\n",
192 | " \"data\": {\n",
193 | " \"input_frames\": {\n",
194 | " \"video_file\": \"/content/input_video.mp4\",\n",
195 | " \"image_folder\": None,\n",
196 | " \"height\": 1024,\n",
197 | " \"width\": 1024,\n",
198 | " \"start_frame_id\": 0,\n",
199 | " \"end_frame_id\": 30\n",
200 | " },\n",
201 | " \"controlnet_frames\": [\n",
202 | " {\n",
203 | " \"video_file\": \"/content/input_video.mp4\",\n",
204 | " \"image_folder\": None,\n",
205 | " \"height\": 1024,\n",
206 | " \"width\": 1024,\n",
207 | " \"start_frame_id\": 0,\n",
208 | " \"end_frame_id\": 30\n",
209 | " },\n",
210 | " {\n",
211 | " \"video_file\": \"/content/input_video.mp4\",\n",
212 | " \"image_folder\": None,\n",
213 | " \"height\": 1024,\n",
214 | " \"width\": 1024,\n",
215 | " \"start_frame_id\": 0,\n",
216 | " \"end_frame_id\": 30\n",
217 | " }\n",
218 | " ],\n",
219 | " \"output_folder\": \"/content/output\",\n",
220 | " \"fps\": 25\n",
221 | " },\n",
222 | " \"pipeline\": {\n",
223 | " \"seed\": 0,\n",
224 | " \"pipeline_inputs\": {\n",
225 | " \"prompt\": \"best quality, perfect anime illustration, light, a girl is dancing, smile, solo\",\n",
226 | " \"negative_prompt\": \"verybadimagenegative_v1.3\",\n",
227 | " \"cfg_scale\": 7.0,\n",
228 | " \"clip_skip\": 2,\n",
229 | " \"denoising_strength\": 1.0,\n",
230 | " \"num_inference_steps\": 10,\n",
231 | " \"animatediff_batch_size\": 16,\n",
232 | " \"animatediff_stride\": 8,\n",
233 | " \"unet_batch_size\": 1,\n",
234 | " \"controlnet_batch_size\": 1,\n",
235 | " \"cross_frame_attention\": False,\n",
236 | " # The following parameters will be overwritten. You don't need to modify them.\n",
237 | " \"input_frames\": [],\n",
238 | " \"num_frames\": 30,\n",
239 | " \"width\": 1536,\n",
240 | " \"height\": 1536,\n",
241 | " \"controlnet_frames\": []\n",
242 | " }\n",
243 | " }\n",
244 | "}\n",
245 | "\n",
246 | "from diffsynth import SDVideoPipelineRunner\n",
247 | "\n",
248 | "config_stage_2 = config_stage_2_template.copy()\n",
249 | "config_stage_2[\"data\"][\"input_frames\"] = {\n",
250 | " \"video_file\": \"/content/input_video.mp4\",\n",
251 | " \"image_folder\": None,\n",
252 | " \"height\": 1024,\n",
253 | " \"width\": 1024,\n",
254 | " \"start_frame_id\": 0,\n",
255 | " \"end_frame_id\": 30\n",
256 | "}\n",
257 | "config_stage_2[\"data\"][\"controlnet_frames\"][0] = {\n",
258 | " \"video_file\": \"/content/color_video/video.mp4\",\n",
259 | " \"image_folder\": None,\n",
260 | " \"height\": config_stage_2[\"data\"][\"input_frames\"][\"height\"],\n",
261 | " \"width\": config_stage_2[\"data\"][\"input_frames\"][\"width\"],\n",
262 | " \"start_frame_id\": None,\n",
263 | " \"end_frame_id\": None\n",
264 | "}\n",
265 | "config_stage_2[\"data\"][\"controlnet_frames\"][1] = config_stage_2[\"data\"][\"input_frames\"]\n",
266 | "config_stage_2[\"data\"][\"output_folder\"] = \"/content/edit_video\"\n",
267 | "config_stage_2[\"data\"][\"fps\"] = 25\n",
268 | "\n",
269 | "runner = SDVideoPipelineRunner()\n",
270 | "runner.run(config_stage_2)"
271 | ]
272 | },
273 | {
274 | "cell_type": "code",
275 | "execution_count": null,
276 | "metadata": {},
277 | "outputs": [],
278 | "source": [
279 | "import moviepy.editor\n",
280 | "moviepy.editor.ipython_display(\"/content/edit_video/video.mp4\")"
281 | ]
282 | }
283 | ],
284 | "metadata": {
285 | "accelerator": "GPU",
286 | "colab": {
287 | "gpuType": "T4",
288 | "provenance": []
289 | },
290 | "kernelspec": {
291 | "display_name": "Python 3",
292 | "name": "python3"
293 | },
294 | "language_info": {
295 | "name": "python"
296 | }
297 | },
298 | "nbformat": 4,
299 | "nbformat_minor": 0
300 | }
301 |
--------------------------------------------------------------------------------
/Diffutoon_jupyter.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {
6 | "id": "view-in-github"
7 | },
8 | "source": [
9 | "[](https://colab.research.google.com/github/camenduru/Diffutoon-jupyter/blob/main/Diffutoon_jupyter.ipynb)"
10 | ]
11 | },
12 | {
13 | "cell_type": "code",
14 | "execution_count": null,
15 | "metadata": {
16 | "id": "VjYy0F2gZIPR"
17 | },
18 | "outputs": [],
19 | "source": [
20 | "# https://github.com/modelscope/DiffSynth-Studio/blob/main/examples/Diffutoon/Diffutoon.ipynb modified\n",
21 | "\n",
22 | "%cd /content\n",
23 | "!git clone https://github.com/Artiprocher/DiffSynth-Studio\n",
24 | "%cd /content/DiffSynth-Studio\n",
25 | "\n",
26 | "!pip install -q einops transformers controlnet-aux==0.0.7 sentencepiece imageio imageio-ffmpeg\n",
27 | "\n",
28 | "!apt -y install -qq aria2\n",
29 | "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://civitai.com/api/download/models/229575 -d /content/DiffSynth-Studio/models/stable_diffusion -o aingdiffusion_v12.safetensors\n",
30 | "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/guoyww/animatediff/resolve/main/mm_sd_v15_v2.ckpt -d /content/DiffSynth-Studio/models/AnimateDiff -o mm_sd_v15_v2.ckpt\n",
31 | "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lllyasviel/ControlNet-v1-1/resolve/main/control_v11p_sd15_lineart.pth -d /content/DiffSynth-Studio/models/ControlNet -o control_v11p_sd15_lineart.pth\n",
32 | "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lllyasviel/ControlNet-v1-1/resolve/main/control_v11f1e_sd15_tile.pth -d /content/DiffSynth-Studio/models/ControlNet -o control_v11f1e_sd15_tile.pth\n",
33 | "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lllyasviel/ControlNet-v1-1/resolve/main/control_v11f1p_sd15_depth.pth -d /content/DiffSynth-Studio/models/ControlNet -o control_v11f1p_sd15_depth.pth\n",
34 | "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lllyasviel/ControlNet-v1-1/resolve/main/control_v11p_sd15_softedge.pth -d /content/DiffSynth-Studio/models/ControlNet -o control_v11p_sd15_softedge.pth\n",
35 | "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lllyasviel/Annotators/resolve/main/dpt_hybrid-midas-501f0c75.pt -d /content/DiffSynth-Studio/models/Annotators -o dpt_hybrid-midas-501f0c75.pt\n",
36 | "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lllyasviel/Annotators/resolve/main/ControlNetHED.pth -d /content/DiffSynth-Studio/models/Annotators -o ControlNetHED.pth\n",
37 | "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lllyasviel/Annotators/resolve/main/sk_model.pth -d /content/DiffSynth-Studio/models/Annotators -o sk_model.pth\n",
38 | "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lllyasviel/Annotators/resolve/main/sk_model2.pth -d /content/DiffSynth-Studio/models/Annotators -o sk_model2.pth\n",
39 | "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M \"https://civitai.com/api/download/models/25820?type=Model&format=PickleTensor&size=full&fp=fp16\" -d /content/DiffSynth-Studio/models/textual_inversion -o verybadimagenegative_v1.3.pt\n",
40 | "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/Diffutoon/resolve/main/input_video.mp4 -d /content -o input_video.mp4"
41 | ]
42 | },
43 | {
44 | "cell_type": "code",
45 | "execution_count": null,
46 | "metadata": {},
47 | "outputs": [],
48 | "source": [
49 | "config_stage_2_template = {\n",
50 | " \"models\": {\n",
51 | " \"model_list\": [\n",
52 | " \"models/stable_diffusion/aingdiffusion_v12.safetensors\",\n",
53 | " \"models/AnimateDiff/mm_sd_v15_v2.ckpt\",\n",
54 | " \"models/ControlNet/control_v11f1e_sd15_tile.pth\",\n",
55 | " \"models/ControlNet/control_v11p_sd15_lineart.pth\"\n",
56 | " ],\n",
57 | " \"textual_inversion_folder\": \"models/textual_inversion\",\n",
58 | " \"device\": \"cuda\",\n",
59 | " \"lora_alphas\": [],\n",
60 | " \"controlnet_units\": [\n",
61 | " {\n",
62 | " \"processor_id\": \"tile\",\n",
63 | " \"model_path\": \"models/ControlNet/control_v11f1e_sd15_tile.pth\",\n",
64 | " \"scale\": 0.5\n",
65 | " },\n",
66 | " {\n",
67 | " \"processor_id\": \"lineart\",\n",
68 | " \"model_path\": \"models/ControlNet/control_v11p_sd15_lineart.pth\",\n",
69 | " \"scale\": 0.5\n",
70 | " }\n",
71 | " ]\n",
72 | " },\n",
73 | " \"data\": {\n",
74 | " \"input_frames\": {\n",
75 | " \"video_file\": \"/content/input_video.mp4\",\n",
76 | " \"image_folder\": None,\n",
77 | " \"height\": 1024,\n",
78 | " \"width\": 1024,\n",
79 | " \"start_frame_id\": 0,\n",
80 | " \"end_frame_id\": 30\n",
81 | " },\n",
82 | " \"controlnet_frames\": [\n",
83 | " {\n",
84 | " \"video_file\": \"/content/input_video.mp4\",\n",
85 | " \"image_folder\": None,\n",
86 | " \"height\": 1024,\n",
87 | " \"width\": 1024,\n",
88 | " \"start_frame_id\": 0,\n",
89 | " \"end_frame_id\": 30\n",
90 | " },\n",
91 | " {\n",
92 | " \"video_file\": \"/content/input_video.mp4\",\n",
93 | " \"image_folder\": None,\n",
94 | " \"height\": 1024,\n",
95 | " \"width\": 1024,\n",
96 | " \"start_frame_id\": 0,\n",
97 | " \"end_frame_id\": 30\n",
98 | " }\n",
99 | " ],\n",
100 | " \"output_folder\": \"/content/output\",\n",
101 | " \"fps\": 25\n",
102 | " },\n",
103 | " \"pipeline\": {\n",
104 | " \"seed\": 0,\n",
105 | " \"pipeline_inputs\": {\n",
106 | " \"prompt\": \"best quality, perfect anime illustration, light, a girl is dancing, smile, solo\",\n",
107 | " \"negative_prompt\": \"verybadimagenegative_v1.3\",\n",
108 | " \"cfg_scale\": 7.0,\n",
109 | " \"clip_skip\": 2,\n",
110 | " \"denoising_strength\": 1.0,\n",
111 | " \"num_inference_steps\": 10,\n",
112 | " \"animatediff_batch_size\": 16,\n",
113 | " \"animatediff_stride\": 8,\n",
114 | " \"unet_batch_size\": 1,\n",
115 | " \"controlnet_batch_size\": 1,\n",
116 | " \"cross_frame_attention\": False,\n",
117 | " # The following parameters will be overwritten. You don't need to modify them.\n",
118 | " \"input_frames\": [],\n",
119 | " \"num_frames\": 30,\n",
120 | " \"width\": 1536,\n",
121 | " \"height\": 1536,\n",
122 | " \"controlnet_frames\": []\n",
123 | " }\n",
124 | " }\n",
125 | "}"
126 | ]
127 | },
128 | {
129 | "cell_type": "code",
130 | "execution_count": null,
131 | "metadata": {},
132 | "outputs": [],
133 | "source": [
134 | "from diffsynth import SDVideoPipelineRunner\n",
135 | "\n",
136 | "config = config_stage_2_template.copy()\n",
137 | "config[\"data\"][\"input_frames\"] = {\n",
138 | " \"video_file\": \"/content/input_video.mp4\",\n",
139 | " \"image_folder\": None,\n",
140 | " \"height\": 1024,\n",
141 | " \"width\": 1024,\n",
142 | " \"start_frame_id\": 0,\n",
143 | " \"end_frame_id\": 30\n",
144 | "}\n",
145 | "config[\"data\"][\"controlnet_frames\"] = [config[\"data\"][\"input_frames\"], config[\"data\"][\"input_frames\"]]\n",
146 | "config[\"data\"][\"output_folder\"] = \"/content/toon_video\"\n",
147 | "config[\"data\"][\"fps\"] = 25\n",
148 | "\n",
149 | "runner = SDVideoPipelineRunner()\n",
150 | "runner.run(config)"
151 | ]
152 | },
153 | {
154 | "cell_type": "code",
155 | "execution_count": null,
156 | "metadata": {},
157 | "outputs": [],
158 | "source": [
159 | "import moviepy.editor\n",
160 | "moviepy.editor.ipython_display(\"/content/toon_video/video.mp4\")"
161 | ]
162 | }
163 | ],
164 | "metadata": {
165 | "accelerator": "GPU",
166 | "colab": {
167 | "gpuType": "T4",
168 | "provenance": []
169 | },
170 | "kernelspec": {
171 | "display_name": "Python 3",
172 | "name": "python3"
173 | },
174 | "language_info": {
175 | "name": "python"
176 | }
177 | },
178 | "nbformat": 4,
179 | "nbformat_minor": 0
180 | }
181 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | 🐣 Please follow me for new updates https://twitter.com/camenduru
2 | 🔥 Please join our discord server https://discord.gg/k5BwmmvJJU
3 | 🥳 Please join my patreon community https://patreon.com/camenduru
4 |
5 | ### 🍊 Jupyter Notebook
6 |
7 | | Notebook | Info
8 | | --- | --- |
9 | [](https://colab.research.google.com/github/camenduru/Diffutoon-jupyter/blob/main/Diffutoon_jupyter.ipynb) | Diffutoon_jupyter
10 | [](https://colab.research.google.com/github/camenduru/Diffutoon-jupyter/blob/main/Diffutoon_color_jupyter.ipynb) | Diffutoon_color_jupyter
11 |
12 | ### 🧬 Code
13 | https://github.com/modelscope/DiffSynth-Studio/tree/main/examples/Diffutoon
14 |
15 | ### 📄 Paper
16 | https://arxiv.org/abs/2401.16224
17 |
18 | ### 🌐 Page
19 | https://ecnu-cilab.github.io/DiffutoonProjectPage/
20 |
21 | ### 🖼 Output
22 |
23 | Diffutoon_color_jupyter
24 |
25 | https://github.com/camenduru/Diffutoon-jupyter/assets/54370274/e8741f4b-8925-488b-95b6-3812d219af06
26 |
27 | ### 🏢 Sponsor
28 | https://runpod.io
29 |
--------------------------------------------------------------------------------