├── .editorconfig ├── .flake8 ├── .github ├── FUNDING.yml ├── preview-diffus.jpg ├── preview.jpg └── workflows │ └── ci.yml ├── .gitignore ├── README.md ├── facefusion ├── __init__.py ├── choices.py ├── core.py ├── face_analyser.py ├── face_cache.py ├── face_reference.py ├── globals.py ├── installer.py ├── metadata.py ├── predictor.py ├── processors │ ├── __init__.py │ └── frame │ │ ├── __init__.py │ │ ├── choices.py │ │ ├── core.py │ │ ├── globals.py │ │ └── modules │ │ ├── __init__.py │ │ ├── face_enhancer.py │ │ ├── face_swapper.py │ │ └── frame_enhancer.py ├── typing.py ├── uis │ ├── __init__.py │ ├── assets │ │ ├── fixes.css │ │ └── overrides.css │ ├── choices.py │ ├── components │ │ ├── __init__.py │ │ ├── about.py │ │ ├── benchmark.py │ │ ├── benchmark_options.py │ │ ├── common_options.py │ │ ├── execution.py │ │ ├── execution_queue_count.py │ │ ├── execution_thread_count.py │ │ ├── face_analyser.py │ │ ├── face_selector.py │ │ ├── frame_processors.py │ │ ├── frame_processors_options.py │ │ ├── limit_resources.py │ │ ├── output.py │ │ ├── output_options.py │ │ ├── preview.py │ │ ├── source.py │ │ ├── target.py │ │ ├── temp_frame.py │ │ ├── trim_frame.py │ │ ├── webcam.py │ │ └── webcam_options.py │ ├── core.py │ ├── layouts │ │ ├── benchmark.py │ │ ├── default.py │ │ └── webcam.py │ └── typing.py ├── utilities.py ├── vision.py └── wording.py ├── install.py ├── install_origin.py ├── mypy.ini ├── requirements.txt ├── run.py ├── scripts └── facefusion_ui.py └── tests ├── __init__.py ├── test_cli.py ├── test_utilities.py └── test_vision.py /.editorconfig: -------------------------------------------------------------------------------- 1 | root = true 2 | 3 | [*] 4 | end_of_line = lf 5 | insert_final_newline = true 6 | indent_size = 4 7 | indent_style = tab 8 | trim_trailing_whitespace = true 9 | -------------------------------------------------------------------------------- /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | select = E3, E4, F 3 | per-file-ignores = facefusion/core.py:E402, facefusion/installer.py:E402 4 | -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | github: henryruhs 2 | custom: https://paypal.me/henryruhs 3 | -------------------------------------------------------------------------------- /.github/preview-diffus.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/diffus-me/sd-webui-facefusion/f84fed74bd20374112c7009f8d75c391d64cfa00/.github/preview-diffus.jpg -------------------------------------------------------------------------------- /.github/preview.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/diffus-me/sd-webui-facefusion/f84fed74bd20374112c7009f8d75c391d64cfa00/.github/preview.jpg -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: ci 2 | 3 | on: [ push, pull_request ] 4 | 5 | jobs: 6 | lint: 7 | runs-on: ubuntu-latest 8 | steps: 9 | - name: Checkout 10 | uses: actions/checkout@v2 11 | - name: Set up Python 3.10 12 | uses: actions/setup-python@v2 13 | with: 14 | python-version: '3.10' 15 | - run: pip install flake8 16 | - run: pip install mypy 17 | - run: flake8 run.py facefusion tests 18 | - run: mypy run.py facefusion tests 19 | test: 20 | strategy: 21 | matrix: 22 | os: [ macos-latest, ubuntu-latest, windows-latest ] 23 | runs-on: ${{ matrix.os }} 24 | steps: 25 | - name: Checkout 26 | uses: actions/checkout@v2 27 | - name: Set up ffmpeg 28 | uses: FedericoCarboni/setup-ffmpeg@v2 29 | - name: Set up Python 3.10 30 | uses: actions/setup-python@v2 31 | with: 32 | python-version: '3.10' 33 | - run: python install.py --torch cpu --onnxruntime default 34 | - run: pip install pytest 35 | - run: pytest 36 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .assets 2 | .idea 3 | .vscode 4 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # FaceFuison extension for StableDiffusion Webui 2 | 3 | [FaceFusion](https://github.com/facefusion/facefusion) is a very nice face swapper and enhancer. 4 | 5 | This repo makes it an extension of [AUTOMATIC1111 Webui](https://github.com/AUTOMATIC1111/stable-diffusion-webui/). 6 | 7 | ![Preview](.github/preview.jpg) 8 | 9 | ## Installation 10 | 11 | - FaceFusion requires `ffmpeg`, install it first: 12 | - For Ubuntu: `sudo apt install ffmpeg` 13 | - For MacOS: `brew install ffmpeg` 14 | - Clone this repo into `stable-diffusion-webui/extensions` folder. 15 | - Start the Webui. 16 | 17 | ## Diffus Webui 18 | 19 | [Diffus Webui](https://www.diffus.me?utm_source=af_901d2b1ee3&utm_medium=af_901d2b1ee3&utm_campaign=af_901d2b1ee3) is a hosted Stable Diffusion WebUI base on 20 | [AUTOMATIC1111 Webui](https://github.com/AUTOMATIC1111/stable-diffusion-webui/). You can also use 21 | FaceFusion extension on it. 22 | 23 | ![Preview](.github/preview-diffus.jpg) 24 | 25 | You can also join our [Discord](https://discord.gg/CDhw9n9yfQ) community and let us know what you 26 | want us to build and release next. 27 | 28 | ## Disclaimer 29 | 30 | We acknowledge the unethical potential of FaceFusion and are resolutely dedicated to establishing 31 | safeguards against such misuse. This program has been engineered to abstain from processing 32 | inappropriate content such as nudity, graphic content and sensitive material. 33 | 34 | It is important to note that we maintain a strong stance against any type of pornographic nature and 35 | do not collaborate with any websites promoting the unauthorized use of our software. 36 | 37 | Users who seek to engage in such activities will face consequences, including being banned from our 38 | community. We reserve the right to report developers on GitHub who distribute unlocked forks of our 39 | software at any time. 40 | -------------------------------------------------------------------------------- /facefusion/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/diffus-me/sd-webui-facefusion/f84fed74bd20374112c7009f8d75c391d64cfa00/facefusion/__init__.py -------------------------------------------------------------------------------- /facefusion/choices.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | 3 | from facefusion.typing import FaceRecognition, FaceAnalyserDirection, FaceAnalyserAge, FaceAnalyserGender, TempFrameFormat, OutputVideoEncoder 4 | 5 | face_recognitions : List[FaceRecognition] = [ 'reference', 'many' ] 6 | face_analyser_directions : List[FaceAnalyserDirection] = [ 'left-right', 'right-left', 'top-bottom', 'bottom-top', 'small-large', 'large-small' ] 7 | face_analyser_ages : List[FaceAnalyserAge] = [ 'child', 'teen', 'adult', 'senior' ] 8 | face_analyser_genders : List[FaceAnalyserGender] = [ 'male', 'female' ] 9 | temp_frame_formats : List[TempFrameFormat] = [ 'jpg', 'png' ] 10 | output_video_encoders : List[OutputVideoEncoder] = [ 'libx264', 'libx265', 'libvpx-vp9', 'h264_nvenc', 'hevc_nvenc' ] 11 | -------------------------------------------------------------------------------- /facefusion/core.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | os.environ['OMP_NUM_THREADS'] = '1' 4 | os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' 5 | 6 | import signal 7 | import sys 8 | import warnings 9 | import platform 10 | import shutil 11 | import onnxruntime 12 | import tensorflow 13 | from argparse import ArgumentParser, HelpFormatter 14 | 15 | import facefusion.choices 16 | import facefusion.globals 17 | from facefusion import metadata, wording 18 | from facefusion.predictor import predict_image, predict_video 19 | from facefusion.processors.frame.core import get_frame_processors_modules, load_frame_processor_module 20 | from facefusion.utilities import is_image, is_video, detect_fps, compress_image, merge_video, extract_frames, get_temp_frame_paths, restore_audio, create_temp, move_temp, clear_temp, list_module_names, encode_execution_providers, decode_execution_providers, normalize_output_path 21 | 22 | warnings.filterwarnings('ignore', category = FutureWarning, module = 'insightface') 23 | warnings.filterwarnings('ignore', category = UserWarning, module = 'gradio') 24 | warnings.filterwarnings('ignore', category = UserWarning, module = 'torchvision') 25 | 26 | def get_argument_parser(): 27 | try: 28 | signal.signal(signal.SIGINT, lambda signal_number, frame: destroy()) 29 | except ValueError: 30 | pass 31 | program = ArgumentParser(formatter_class = lambda prog: HelpFormatter(prog, max_help_position = 120), add_help = False) 32 | # general 33 | program.add_argument('-s', '--source', help = wording.get('source_help'), dest = 'source_path') 34 | program.add_argument('-t', '--target', help = wording.get('target_help'), dest = 'target_path') 35 | program.add_argument('-o', '--output', help = wording.get('output_help'), dest = 'output_path') 36 | program.add_argument('-v', '--version', version = metadata.get('name') + ' ' + metadata.get('version'), action = 'version') 37 | # misc 38 | group_misc = program.add_argument_group('misc') 39 | group_misc.add_argument('--skip-download', help = wording.get('skip_download_help'), dest = 'skip_download', action = 'store_true') 40 | group_misc.add_argument('--headless', help = wording.get('headless_help'), dest = 'headless', action = 'store_true') 41 | # execution 42 | group_execution = program.add_argument_group('execution') 43 | group_execution.add_argument('--execution-providers', help = wording.get('execution_providers_help').format(choices = 'cpu'), dest = 'execution_providers', default = [ 'cpu' ], choices = encode_execution_providers(onnxruntime.get_available_providers()), nargs = '+') 44 | group_execution.add_argument('--execution-thread-count', help = wording.get('execution_thread_count_help'), dest = 'execution_thread_count', type = int, default = 1) 45 | group_execution.add_argument('--execution-queue-count', help = wording.get('execution_queue_count_help'), dest = 'execution_queue_count', type = int, default = 1) 46 | group_execution.add_argument('--max-memory', help=wording.get('max_memory_help'), dest='max_memory', type = int) 47 | # face recognition 48 | group_face_recognition = program.add_argument_group('face recognition') 49 | group_face_recognition.add_argument('--face-recognition', help = wording.get('face_recognition_help'), dest = 'face_recognition', default = 'reference', choices = facefusion.choices.face_recognitions) 50 | group_face_recognition.add_argument('--face-analyser-direction', help = wording.get('face_analyser_direction_help'), dest = 'face_analyser_direction', default = 'left-right', choices = facefusion.choices.face_analyser_directions) 51 | group_face_recognition.add_argument('--face-analyser-age', help = wording.get('face_analyser_age_help'), dest = 'face_analyser_age', choices = facefusion.choices.face_analyser_ages) 52 | group_face_recognition.add_argument('--face-analyser-gender', help = wording.get('face_analyser_gender_help'), dest = 'face_analyser_gender', choices = facefusion.choices.face_analyser_genders) 53 | group_face_recognition.add_argument('--reference-face-position', help = wording.get('reference_face_position_help'), dest = 'reference_face_position', type = int, default = 0) 54 | group_face_recognition.add_argument('--reference-face-distance', help = wording.get('reference_face_distance_help'), dest = 'reference_face_distance', type = float, default = 1.5) 55 | group_face_recognition.add_argument('--reference-frame-number', help = wording.get('reference_frame_number_help'), dest = 'reference_frame_number', type = int, default = 0) 56 | # frame extraction 57 | group_processing = program.add_argument_group('frame extraction') 58 | group_processing.add_argument('--trim-frame-start', help = wording.get('trim_frame_start_help'), dest = 'trim_frame_start', type = int) 59 | group_processing.add_argument('--trim-frame-end', help = wording.get('trim_frame_end_help'), dest = 'trim_frame_end', type = int) 60 | group_processing.add_argument('--temp-frame-format', help = wording.get('temp_frame_format_help'), dest = 'temp_frame_format', default = 'jpg', choices = facefusion.choices.temp_frame_formats) 61 | group_processing.add_argument('--temp-frame-quality', help = wording.get('temp_frame_quality_help'), dest = 'temp_frame_quality', type = int, default = 100, choices = range(101), metavar = '[0-100]') 62 | group_processing.add_argument('--keep-temp', help = wording.get('keep_temp_help'), dest = 'keep_temp', action = 'store_true') 63 | # output creation 64 | group_output = program.add_argument_group('output creation') 65 | group_output.add_argument('--output-image-quality', help=wording.get('output_image_quality_help'), dest = 'output_image_quality', type = int, default = 80, choices = range(101), metavar = '[0-100]') 66 | group_output.add_argument('--output-video-encoder', help = wording.get('output_video_encoder_help'), dest = 'output_video_encoder', default = 'libx264', choices = facefusion.choices.output_video_encoders) 67 | group_output.add_argument('--output-video-quality', help = wording.get('output_video_quality_help'), dest = 'output_video_quality', type = int, default = 80, choices = range(101), metavar = '[0-100]') 68 | group_output.add_argument('--keep-fps', help = wording.get('keep_fps_help'), dest = 'keep_fps', action = 'store_true') 69 | group_output.add_argument('--skip-audio', help = wording.get('skip_audio_help'), dest = 'skip_audio', action = 'store_true') 70 | # frame processors 71 | available_frame_processors = list_module_names('facefusion/processors/frame/modules') 72 | program = ArgumentParser(parents = [ program ], formatter_class = program.formatter_class, add_help = True) 73 | group_frame_processors = program.add_argument_group('frame processors') 74 | group_frame_processors.add_argument('--frame-processors', help = wording.get('frame_processors_help').format(choices = ', '.join(available_frame_processors)), dest = 'frame_processors', default = [ 'face_swapper' ], nargs = '+') 75 | for frame_processor in available_frame_processors: 76 | frame_processor_module = load_frame_processor_module(frame_processor) 77 | frame_processor_module.register_args(group_frame_processors) 78 | # uis 79 | group_uis = program.add_argument_group('uis') 80 | group_uis.add_argument('--ui-layouts', help = wording.get('ui_layouts_help').format(choices = ', '.join(list_module_names('facefusion/uis/layouts'))), dest = 'ui_layouts', default = [ 'default' ], nargs = '+') 81 | 82 | return program 83 | 84 | def cli() -> None: 85 | run(get_argument_parser()) 86 | 87 | def apply_args(program : ArgumentParser) -> None: 88 | args = program.parse_args([]) 89 | # general 90 | facefusion.globals.source_path = args.source_path 91 | facefusion.globals.target_path = args.target_path 92 | facefusion.globals.output_path = normalize_output_path(facefusion.globals.source_path, facefusion.globals.target_path, args.output_path) 93 | # misc 94 | facefusion.globals.skip_download = args.skip_download 95 | facefusion.globals.headless = args.headless 96 | # execution 97 | facefusion.globals.execution_providers = decode_execution_providers(args.execution_providers) 98 | facefusion.globals.execution_thread_count = args.execution_thread_count 99 | facefusion.globals.execution_queue_count = args.execution_queue_count 100 | facefusion.globals.max_memory = args.max_memory 101 | # face recognition 102 | facefusion.globals.face_recognition = args.face_recognition 103 | facefusion.globals.face_analyser_direction = args.face_analyser_direction 104 | facefusion.globals.face_analyser_age = args.face_analyser_age 105 | facefusion.globals.face_analyser_gender = args.face_analyser_gender 106 | facefusion.globals.reference_face_position = args.reference_face_position 107 | facefusion.globals.reference_face_distance = args.reference_face_distance 108 | facefusion.globals.reference_frame_number = args.reference_frame_number 109 | # frame extraction 110 | facefusion.globals.trim_frame_start = args.trim_frame_start 111 | facefusion.globals.trim_frame_end = args.trim_frame_end 112 | facefusion.globals.temp_frame_format = args.temp_frame_format 113 | facefusion.globals.temp_frame_quality = args.temp_frame_quality 114 | facefusion.globals.keep_temp = args.keep_temp 115 | # output creation 116 | facefusion.globals.output_image_quality = args.output_image_quality 117 | facefusion.globals.output_video_encoder = args.output_video_encoder 118 | facefusion.globals.output_video_quality = args.output_video_quality 119 | facefusion.globals.keep_fps = args.keep_fps 120 | facefusion.globals.skip_audio = args.skip_audio 121 | # frame processors 122 | available_frame_processors = list_module_names('facefusion/processors/frame/modules') 123 | facefusion.globals.frame_processors = args.frame_processors 124 | for frame_processor in available_frame_processors: 125 | frame_processor_module = load_frame_processor_module(frame_processor) 126 | frame_processor_module.apply_args(program) 127 | # uis 128 | facefusion.globals.ui_layouts = args.ui_layouts 129 | 130 | 131 | def run(program : ArgumentParser) -> None: 132 | apply_args(program) 133 | limit_resources() 134 | if not pre_check(): 135 | return 136 | for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors): 137 | if not frame_processor_module.pre_check(): 138 | return 139 | if facefusion.globals.headless: 140 | conditional_process() 141 | else: 142 | import facefusion.uis.core as ui 143 | 144 | for ui_layout in ui.get_ui_layouts_modules(facefusion.globals.ui_layouts): 145 | if not ui_layout.pre_check(): 146 | return 147 | ui.launch() 148 | 149 | 150 | def destroy() -> None: 151 | if facefusion.globals.target_path: 152 | clear_temp(facefusion.globals.target_path) 153 | sys.exit() 154 | 155 | 156 | def limit_resources() -> None: 157 | # prevent tensorflow memory leak 158 | gpus = tensorflow.config.experimental.list_physical_devices('GPU') 159 | for gpu in gpus: 160 | tensorflow.config.experimental.set_virtual_device_configuration(gpu, 161 | [ 162 | tensorflow.config.experimental.VirtualDeviceConfiguration(memory_limit = 512) 163 | ]) 164 | # limit memory usage 165 | if facefusion.globals.max_memory: 166 | memory = facefusion.globals.max_memory * 1024 ** 3 167 | if platform.system().lower() == 'darwin': 168 | memory = facefusion.globals.max_memory * 1024 ** 6 169 | if platform.system().lower() == 'windows': 170 | import ctypes 171 | kernel32 = ctypes.windll.kernel32 # type: ignore[attr-defined] 172 | kernel32.SetProcessWorkingSetSize(-1, ctypes.c_size_t(memory), ctypes.c_size_t(memory)) 173 | else: 174 | import resource 175 | resource.setrlimit(resource.RLIMIT_DATA, (memory, memory)) 176 | 177 | 178 | def pre_check() -> bool: 179 | if sys.version_info < (3, 9): 180 | update_status(wording.get('python_not_supported').format(version = '3.9')) 181 | return False 182 | if not shutil.which('ffmpeg'): 183 | update_status(wording.get('ffmpeg_not_installed')) 184 | return False 185 | return True 186 | 187 | 188 | def conditional_process() -> None: 189 | for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors): 190 | if not frame_processor_module.pre_process('output'): 191 | return 192 | if is_image(facefusion.globals.target_path): 193 | process_image() 194 | if is_video(facefusion.globals.target_path): 195 | process_video() 196 | 197 | 198 | def process_image() -> None: 199 | if predict_image(facefusion.globals.target_path): 200 | return 201 | shutil.copy2(facefusion.globals.target_path, facefusion.globals.output_path) 202 | # process frame 203 | for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors): 204 | update_status(wording.get('processing'), frame_processor_module.NAME) 205 | frame_processor_module.process_image(facefusion.globals.source_path, facefusion.globals.output_path, facefusion.globals.output_path) 206 | frame_processor_module.post_process() 207 | # compress image 208 | update_status(wording.get('compressing_image')) 209 | if not compress_image(facefusion.globals.output_path): 210 | update_status(wording.get('compressing_image_failed')) 211 | # validate image 212 | if is_image(facefusion.globals.target_path): 213 | update_status(wording.get('processing_image_succeed')) 214 | else: 215 | update_status(wording.get('processing_image_failed')) 216 | 217 | 218 | def process_video() -> None: 219 | if predict_video(facefusion.globals.target_path): 220 | return 221 | fps = detect_fps(facefusion.globals.target_path) if facefusion.globals.keep_fps else 25.0 222 | # create temp 223 | update_status(wording.get('creating_temp')) 224 | create_temp(facefusion.globals.target_path) 225 | # extract frames 226 | update_status(wording.get('extracting_frames_fps').format(fps = fps)) 227 | extract_frames(facefusion.globals.target_path, fps) 228 | # process frame 229 | temp_frame_paths = get_temp_frame_paths(facefusion.globals.target_path) 230 | if temp_frame_paths: 231 | for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors): 232 | update_status(wording.get('processing'), frame_processor_module.NAME) 233 | frame_processor_module.process_video(facefusion.globals.source_path, temp_frame_paths) 234 | frame_processor_module.post_process() 235 | else: 236 | update_status(wording.get('temp_frames_not_found')) 237 | return 238 | # merge video 239 | update_status(wording.get('merging_video_fps').format(fps = fps)) 240 | if not merge_video(facefusion.globals.target_path, fps): 241 | update_status(wording.get('merging_video_failed')) 242 | return 243 | # handle audio 244 | if facefusion.globals.skip_audio: 245 | update_status(wording.get('skipping_audio')) 246 | move_temp(facefusion.globals.target_path, facefusion.globals.output_path) 247 | else: 248 | update_status(wording.get('restoring_audio')) 249 | if not restore_audio(facefusion.globals.target_path, facefusion.globals.output_path): 250 | update_status(wording.get('restoring_audio_failed')) 251 | move_temp(facefusion.globals.target_path, facefusion.globals.output_path) 252 | # clear temp 253 | update_status(wording.get('clearing_temp')) 254 | clear_temp(facefusion.globals.target_path) 255 | # validate video 256 | if is_video(facefusion.globals.target_path): 257 | update_status(wording.get('processing_video_succeed')) 258 | else: 259 | update_status(wording.get('processing_video_failed')) 260 | 261 | 262 | def update_status(message : str, scope : str = 'FACEFUSION.CORE') -> None: 263 | print('[' + scope + '] ' + message) 264 | -------------------------------------------------------------------------------- /facefusion/face_analyser.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Optional, List 2 | import threading 3 | import insightface 4 | import numpy 5 | 6 | import facefusion.globals 7 | from facefusion.face_cache import get_faces_cache, set_faces_cache 8 | from facefusion.typing import Frame, Face, FaceAnalyserDirection, FaceAnalyserAge, FaceAnalyserGender 9 | 10 | FACE_ANALYSER = None 11 | THREAD_LOCK : threading.Lock = threading.Lock() 12 | 13 | 14 | def get_face_analyser() -> Any: 15 | global FACE_ANALYSER 16 | 17 | with THREAD_LOCK: 18 | if FACE_ANALYSER is None: 19 | FACE_ANALYSER = insightface.app.FaceAnalysis(name = 'buffalo_l', providers = facefusion.globals.execution_providers) 20 | FACE_ANALYSER.prepare(ctx_id = 0) 21 | return FACE_ANALYSER 22 | 23 | 24 | def clear_face_analyser() -> Any: 25 | global FACE_ANALYSER 26 | 27 | FACE_ANALYSER = None 28 | 29 | 30 | def get_one_face(frame : Frame, position : int = 0) -> Optional[Face]: 31 | many_faces = get_many_faces(frame) 32 | if many_faces: 33 | try: 34 | return many_faces[position] 35 | except IndexError: 36 | return many_faces[-1] 37 | return None 38 | 39 | 40 | def get_many_faces(frame : Frame) -> List[Face]: 41 | try: 42 | faces_cache = get_faces_cache(frame) 43 | if faces_cache: 44 | faces = faces_cache 45 | else: 46 | faces = get_face_analyser().get(frame) 47 | set_faces_cache(frame, faces) 48 | if facefusion.globals.face_analyser_direction: 49 | faces = sort_by_direction(faces, facefusion.globals.face_analyser_direction) 50 | if facefusion.globals.face_analyser_age: 51 | faces = filter_by_age(faces, facefusion.globals.face_analyser_age) 52 | if facefusion.globals.face_analyser_gender: 53 | faces = filter_by_gender(faces, facefusion.globals.face_analyser_gender) 54 | return faces 55 | except (AttributeError, ValueError): 56 | return [] 57 | 58 | 59 | def find_similar_faces(frame : Frame, reference_face : Face, face_distance : float) -> List[Face]: 60 | many_faces = get_many_faces(frame) 61 | similar_faces = [] 62 | if many_faces: 63 | for face in many_faces: 64 | if hasattr(face, 'normed_embedding') and hasattr(reference_face, 'normed_embedding'): 65 | current_face_distance = numpy.sum(numpy.square(face.normed_embedding - reference_face.normed_embedding)) 66 | if current_face_distance < face_distance: 67 | similar_faces.append(face) 68 | return similar_faces 69 | 70 | 71 | def sort_by_direction(faces : List[Face], direction : FaceAnalyserDirection) -> List[Face]: 72 | if direction == 'left-right': 73 | return sorted(faces, key = lambda face: face['bbox'][0]) 74 | if direction == 'right-left': 75 | return sorted(faces, key = lambda face: face['bbox'][0], reverse = True) 76 | if direction == 'top-bottom': 77 | return sorted(faces, key = lambda face: face['bbox'][1]) 78 | if direction == 'bottom-top': 79 | return sorted(faces, key = lambda face: face['bbox'][1], reverse = True) 80 | if direction == 'small-large': 81 | return sorted(faces, key = lambda face: (face['bbox'][2] - face['bbox'][0]) * (face['bbox'][3] - face['bbox'][1])) 82 | if direction == 'large-small': 83 | return sorted(faces, key = lambda face: (face['bbox'][2] - face['bbox'][0]) * (face['bbox'][3] - face['bbox'][1]), reverse = True) 84 | return faces 85 | 86 | 87 | def filter_by_age(faces : List[Face], age : FaceAnalyserAge) -> List[Face]: 88 | filter_faces = [] 89 | for face in faces: 90 | if face['age'] < 13 and age == 'child': 91 | filter_faces.append(face) 92 | elif face['age'] < 19 and age == 'teen': 93 | filter_faces.append(face) 94 | elif face['age'] < 60 and age == 'adult': 95 | filter_faces.append(face) 96 | elif face['age'] > 59 and age == 'senior': 97 | filter_faces.append(face) 98 | return filter_faces 99 | 100 | 101 | def filter_by_gender(faces : List[Face], gender : FaceAnalyserGender) -> List[Face]: 102 | filter_faces = [] 103 | for face in faces: 104 | if face['gender'] == 1 and gender == 'male': 105 | filter_faces.append(face) 106 | if face['gender'] == 0 and gender == 'female': 107 | filter_faces.append(face) 108 | return filter_faces 109 | -------------------------------------------------------------------------------- /facefusion/face_cache.py: -------------------------------------------------------------------------------- 1 | from typing import Optional, List, Dict 2 | import hashlib 3 | 4 | from facefusion.typing import Frame, Face 5 | 6 | FACES_CACHE : Dict[str, List[Face]] = {} 7 | 8 | 9 | def get_faces_cache(frame : Frame) -> Optional[List[Face]]: 10 | frame_hash = create_frame_hash(frame) 11 | if frame_hash in FACES_CACHE: 12 | return FACES_CACHE[frame_hash] 13 | return None 14 | 15 | 16 | def set_faces_cache(frame : Frame, faces : List[Face]) -> None: 17 | frame_hash = create_frame_hash(frame) 18 | if frame_hash: 19 | FACES_CACHE[frame_hash] = faces 20 | 21 | 22 | def clear_faces_cache() -> None: 23 | global FACES_CACHE 24 | 25 | FACES_CACHE = {} 26 | 27 | 28 | def create_frame_hash(frame : Frame) -> Optional[str]: 29 | return hashlib.sha256(frame.tobytes()).hexdigest() if frame is not None else None 30 | -------------------------------------------------------------------------------- /facefusion/face_reference.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | 3 | from facefusion.typing import Face 4 | 5 | FACE_REFERENCE = None 6 | 7 | 8 | def get_face_reference() -> Optional[Face]: 9 | return FACE_REFERENCE 10 | 11 | 12 | def set_face_reference(face : Face) -> None: 13 | global FACE_REFERENCE 14 | 15 | FACE_REFERENCE = face 16 | 17 | 18 | def clear_face_reference() -> None: 19 | global FACE_REFERENCE 20 | 21 | FACE_REFERENCE = None 22 | -------------------------------------------------------------------------------- /facefusion/globals.py: -------------------------------------------------------------------------------- 1 | from typing import List, Optional 2 | 3 | from facefusion.typing import FaceRecognition, FaceAnalyserDirection, FaceAnalyserAge, FaceAnalyserGender, TempFrameFormat, OutputVideoEncoder 4 | 5 | # general 6 | source_path : Optional[str] = None 7 | target_path : Optional[str] = None 8 | output_path : Optional[str] = None 9 | # misc 10 | skip_download : Optional[bool] = None 11 | headless : Optional[bool] = None 12 | # execution 13 | execution_providers : List[str] = [] 14 | execution_thread_count : Optional[int] = None 15 | execution_queue_count : Optional[int] = None 16 | max_memory : Optional[int] = None 17 | # face recognition 18 | face_recognition : Optional[FaceRecognition] = None 19 | face_analyser_direction : Optional[FaceAnalyserDirection] = None 20 | face_analyser_age : Optional[FaceAnalyserAge] = None 21 | face_analyser_gender : Optional[FaceAnalyserGender] = None 22 | reference_face_position : Optional[int] = None 23 | reference_face_distance : Optional[float] = None 24 | reference_frame_number : Optional[int] = None 25 | # frame extraction 26 | trim_frame_start : Optional[int] = None 27 | trim_frame_end : Optional[int] = None 28 | temp_frame_format : Optional[TempFrameFormat] = None 29 | temp_frame_quality : Optional[int] = None 30 | keep_temp : Optional[bool] = None 31 | # output creation 32 | output_image_quality : Optional[int] = None 33 | output_video_encoder : Optional[OutputVideoEncoder] = None 34 | output_video_quality : Optional[int] = None 35 | keep_fps : Optional[bool] = None 36 | skip_audio : Optional[bool] = None 37 | # frame processors 38 | frame_processors : List[str] = [] 39 | # uis 40 | ui_layouts : List[str] = [] 41 | -------------------------------------------------------------------------------- /facefusion/installer.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, Tuple 2 | import subprocess 3 | from argparse import ArgumentParser, HelpFormatter 4 | 5 | subprocess.call([ 'pip', 'install' , 'inquirer', '-q' ]) 6 | 7 | import inquirer 8 | 9 | from facefusion import metadata, wording 10 | 11 | TORCH : Dict[str, str] =\ 12 | { 13 | 'default': 'default', 14 | 'cpu': 'cpu', 15 | 'cuda': 'cu118', 16 | 'rocm': 'rocm5.6' 17 | } 18 | ONNXRUNTIMES : Dict[str, Tuple[str, str]] =\ 19 | { 20 | 'default': ('onnxruntime', '1.16.0'), 21 | 'cuda': ('onnxruntime-gpu', '1.16.0'), 22 | 'coreml-legacy': ('onnxruntime-coreml', '1.13.1'), 23 | 'coreml-silicon': ('onnxruntime-silicon', '1.16.0'), 24 | 'directml': ('onnxruntime-directml', '1.16.0'), 25 | 'openvino': ('onnxruntime-openvino', '1.15.0') 26 | } 27 | 28 | 29 | def cli() -> None: 30 | program = ArgumentParser(formatter_class = lambda prog: HelpFormatter(prog, max_help_position = 120)) 31 | program.add_argument('--torch', help = wording.get('install_dependency_help').format(dependency = 'torch'), dest = 'torch', choices = TORCH.keys()) 32 | program.add_argument('--onnxruntime', help = wording.get('install_dependency_help').format(dependency = 'onnxruntime'), dest = 'onnxruntime', choices = ONNXRUNTIMES.keys()) 33 | program.add_argument('-v', '--version', version = metadata.get('name') + ' ' + metadata.get('version'), action = 'version') 34 | run(program) 35 | 36 | 37 | def run(program : ArgumentParser) -> None: 38 | args = program.parse_args() 39 | 40 | if args.torch and args.onnxruntime: 41 | answers =\ 42 | { 43 | 'torch': args.torch, 44 | 'onnxruntime': args.onnxruntime 45 | } 46 | else: 47 | answers = inquirer.prompt( 48 | [ 49 | inquirer.List( 50 | 'torch', 51 | message = wording.get('install_dependency_help').format(dependency = 'torch'), 52 | choices = list(TORCH.keys()) 53 | ), 54 | inquirer.List( 55 | 'onnxruntime', 56 | message = wording.get('install_dependency_help').format(dependency = 'onnxruntime'), 57 | choices = list(ONNXRUNTIMES.keys()) 58 | ) 59 | ]) 60 | if answers is not None: 61 | torch = answers['torch'] 62 | torch_wheel = TORCH[torch] 63 | onnxruntime = answers['onnxruntime'] 64 | onnxruntime_name, onnxruntime_version = ONNXRUNTIMES[onnxruntime] 65 | subprocess.call([ 'pip', 'uninstall', 'torch', '-y' ]) 66 | if torch_wheel == 'default': 67 | subprocess.call([ 'pip', 'install', '-r', 'requirements.txt' ]) 68 | else: 69 | subprocess.call([ 'pip', 'install', '-r', 'requirements.txt', '--extra-index-url', 'https://download.pytorch.org/whl/' + torch_wheel ]) 70 | subprocess.call([ 'pip', 'uninstall', 'onnxruntime', onnxruntime_name, '-y' ]) 71 | subprocess.call([ 'pip', 'install', onnxruntime_name + '==' + onnxruntime_version ]) 72 | -------------------------------------------------------------------------------- /facefusion/metadata.py: -------------------------------------------------------------------------------- 1 | METADATA =\ 2 | { 3 | 'name': 'FaceFusion', 4 | 'description': 'Next generation face swapper and enhancer', 5 | 'version': '1.3.1', 6 | 'license': 'MIT', 7 | 'author': 'Henry Ruhs', 8 | 'url': 'https://facefusion.io' 9 | } 10 | 11 | 12 | def get(key : str) -> str: 13 | return METADATA[key] 14 | -------------------------------------------------------------------------------- /facefusion/predictor.py: -------------------------------------------------------------------------------- 1 | import threading 2 | from functools import lru_cache 3 | 4 | import numpy 5 | import opennsfw2 6 | from PIL import Image 7 | from keras import Model 8 | 9 | from facefusion.typing import Frame 10 | 11 | PREDICTOR = None 12 | THREAD_LOCK : threading.Lock = threading.Lock() 13 | MAX_PROBABILITY = 0.75 14 | FRAME_INTERVAL = 25 15 | STREAM_COUNTER = 0 16 | 17 | 18 | def get_predictor() -> Model: 19 | global PREDICTOR 20 | 21 | with THREAD_LOCK: 22 | if PREDICTOR is None: 23 | PREDICTOR = opennsfw2.make_open_nsfw_model() 24 | return PREDICTOR 25 | 26 | 27 | def clear_predictor() -> None: 28 | global PREDICTOR 29 | 30 | PREDICTOR = None 31 | 32 | 33 | def predict_stream(frame : Frame) -> bool: 34 | global STREAM_COUNTER 35 | 36 | STREAM_COUNTER = STREAM_COUNTER + 1 37 | if STREAM_COUNTER % FRAME_INTERVAL == 0: 38 | return predict_frame(frame) 39 | return False 40 | 41 | 42 | def predict_frame(frame : Frame) -> bool: 43 | image = Image.fromarray(frame) 44 | image = opennsfw2.preprocess_image(image, opennsfw2.Preprocessing.YAHOO) 45 | views = numpy.expand_dims(image, axis = 0) 46 | _, probability = get_predictor().predict(views)[0] 47 | return probability > MAX_PROBABILITY 48 | 49 | 50 | @lru_cache(maxsize = None) 51 | def predict_image(image_path : str) -> bool: 52 | return opennsfw2.predict_image(image_path) > MAX_PROBABILITY 53 | 54 | 55 | @lru_cache(maxsize = None) 56 | def predict_video(video_path : str) -> bool: 57 | _, probabilities = opennsfw2.predict_video_frames(video_path = video_path, frame_interval = FRAME_INTERVAL) 58 | return any(probability > MAX_PROBABILITY for probability in probabilities) 59 | -------------------------------------------------------------------------------- /facefusion/processors/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/diffus-me/sd-webui-facefusion/f84fed74bd20374112c7009f8d75c391d64cfa00/facefusion/processors/__init__.py -------------------------------------------------------------------------------- /facefusion/processors/frame/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/diffus-me/sd-webui-facefusion/f84fed74bd20374112c7009f8d75c391d64cfa00/facefusion/processors/frame/__init__.py -------------------------------------------------------------------------------- /facefusion/processors/frame/choices.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | 3 | face_swapper_models : List[str] = [ 'inswapper_128', 'inswapper_128_fp16' ] 4 | face_enhancer_models : List[str] = [ 'codeformer', 'gfpgan_1.2', 'gfpgan_1.3', 'gfpgan_1.4', 'gpen_bfr_512' ] 5 | frame_enhancer_models : List[str] = [ 'realesrgan_x2plus', 'realesrgan_x4plus', 'realesrnet_x4plus' ] 6 | -------------------------------------------------------------------------------- /facefusion/processors/frame/core.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import importlib 4 | import psutil 5 | from concurrent.futures import ThreadPoolExecutor, as_completed 6 | from queue import Queue 7 | from types import ModuleType 8 | from typing import Any, List 9 | from tqdm import tqdm 10 | 11 | import facefusion.globals 12 | from facefusion import wording 13 | from facefusion.typing import Process_Frames 14 | 15 | FRAME_PROCESSORS_MODULES : List[ModuleType] = [] 16 | FRAME_PROCESSORS_METHODS =\ 17 | [ 18 | 'get_frame_processor', 19 | 'clear_frame_processor', 20 | 'get_options', 21 | 'set_options', 22 | 'register_args', 23 | 'apply_args', 24 | 'pre_check', 25 | 'pre_process', 26 | 'process_frame', 27 | 'process_frames', 28 | 'process_image', 29 | 'process_video', 30 | 'post_process' 31 | ] 32 | 33 | 34 | def load_frame_processor_module(frame_processor : str) -> Any: 35 | try: 36 | frame_processor_module = importlib.import_module('facefusion.processors.frame.modules.' + frame_processor) 37 | for method_name in FRAME_PROCESSORS_METHODS: 38 | if not hasattr(frame_processor_module, method_name): 39 | raise NotImplementedError 40 | except ModuleNotFoundError: 41 | sys.exit(wording.get('frame_processor_not_loaded').format(frame_processor = frame_processor)) 42 | except NotImplementedError: 43 | sys.exit(wording.get('frame_processor_not_implemented').format(frame_processor = frame_processor)) 44 | return frame_processor_module 45 | 46 | 47 | def get_frame_processors_modules(frame_processors : List[str]) -> List[ModuleType]: 48 | global FRAME_PROCESSORS_MODULES 49 | 50 | if not FRAME_PROCESSORS_MODULES: 51 | for frame_processor in frame_processors: 52 | frame_processor_module = load_frame_processor_module(frame_processor) 53 | FRAME_PROCESSORS_MODULES.append(frame_processor_module) 54 | return FRAME_PROCESSORS_MODULES 55 | 56 | 57 | def clear_frame_processors_modules() -> None: 58 | global FRAME_PROCESSORS_MODULES 59 | 60 | for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors): 61 | frame_processor_module.clear_frame_processor() 62 | FRAME_PROCESSORS_MODULES = [] 63 | 64 | 65 | def multi_process_frames(source_path : str, temp_frame_paths : List[str], process_frames : Process_Frames) -> None: 66 | progress_bar_format = '{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}{postfix}]' 67 | with tqdm(total = len(temp_frame_paths), desc = wording.get('processing'), unit = 'frame', dynamic_ncols = True, bar_format = progress_bar_format) as progress: 68 | with ThreadPoolExecutor(max_workers = facefusion.globals.execution_thread_count) as executor: 69 | futures = [] 70 | queue_temp_frame_paths : Queue[str] = create_queue(temp_frame_paths) 71 | queue_per_future = max(len(temp_frame_paths) // facefusion.globals.execution_thread_count * facefusion.globals.execution_queue_count, 1) 72 | while not queue_temp_frame_paths.empty(): 73 | payload_temp_frame_paths = pick_queue(queue_temp_frame_paths, queue_per_future) 74 | future = executor.submit(process_frames, source_path, payload_temp_frame_paths, lambda: update_progress(progress)) 75 | futures.append(future) 76 | for future_done in as_completed(futures): 77 | future_done.result() 78 | 79 | 80 | def create_queue(temp_frame_paths : List[str]) -> Queue[str]: 81 | queue : Queue[str] = Queue() 82 | for frame_path in temp_frame_paths: 83 | queue.put(frame_path) 84 | return queue 85 | 86 | 87 | def pick_queue(queue : Queue[str], queue_per_future : int) -> List[str]: 88 | queues = [] 89 | for _ in range(queue_per_future): 90 | if not queue.empty(): 91 | queues.append(queue.get()) 92 | return queues 93 | 94 | 95 | def update_progress(progress : Any = None) -> None: 96 | process = psutil.Process(os.getpid()) 97 | memory_usage = process.memory_info().rss / 1024 / 1024 / 1024 98 | progress.set_postfix( 99 | { 100 | 'memory_usage': '{:.2f}'.format(memory_usage).zfill(5) + 'GB', 101 | 'execution_providers': facefusion.globals.execution_providers, 102 | 'execution_thread_count': facefusion.globals.execution_thread_count, 103 | 'execution_queue_count': facefusion.globals.execution_queue_count 104 | }) 105 | progress.refresh() 106 | progress.update(1) 107 | -------------------------------------------------------------------------------- /facefusion/processors/frame/globals.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | 3 | face_swapper_model : Optional[str] = None 4 | face_enhancer_model : Optional[str] = None 5 | face_enhancer_blend : Optional[int] = None 6 | frame_enhancer_model : Optional[str] = None 7 | frame_enhancer_blend : Optional[int] = None 8 | -------------------------------------------------------------------------------- /facefusion/processors/frame/modules/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/diffus-me/sd-webui-facefusion/f84fed74bd20374112c7009f8d75c391d64cfa00/facefusion/processors/frame/modules/__init__.py -------------------------------------------------------------------------------- /facefusion/processors/frame/modules/face_enhancer.py: -------------------------------------------------------------------------------- 1 | from typing import Any, List, Tuple, Dict, Literal, Optional 2 | from argparse import ArgumentParser 3 | import cv2 4 | import threading 5 | import numpy 6 | import onnxruntime 7 | 8 | import facefusion.globals 9 | from facefusion import wording 10 | from facefusion.core import update_status 11 | from facefusion.face_analyser import get_many_faces, clear_face_analyser 12 | from facefusion.typing import Face, Frame, Matrix, Update_Process, ProcessMode, ModelValue, OptionsWithModel 13 | from facefusion.utilities import conditional_download, resolve_relative_path, is_image, is_video, is_file, is_download_done 14 | from facefusion.vision import read_image, read_static_image, write_image 15 | from facefusion.processors.frame import globals as frame_processors_globals 16 | from facefusion.processors.frame import choices as frame_processors_choices 17 | 18 | FRAME_PROCESSOR = None 19 | THREAD_SEMAPHORE : threading.Semaphore = threading.Semaphore() 20 | THREAD_LOCK : threading.Lock = threading.Lock() 21 | NAME = 'FACEFUSION.FRAME_PROCESSOR.FACE_ENHANCER' 22 | MODELS : Dict[str, ModelValue] =\ 23 | { 24 | 'codeformer': 25 | { 26 | 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/codeformer.onnx', 27 | 'path': resolve_relative_path('../.assets/models/codeformer.onnx') 28 | }, 29 | 'gfpgan_1.2': 30 | { 31 | 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/GFPGANv1.2.onnx', 32 | 'path': resolve_relative_path('../.assets/models/GFPGANv1.2.onnx') 33 | }, 34 | 'gfpgan_1.3': 35 | { 36 | 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/GFPGANv1.3.onnx', 37 | 'path': resolve_relative_path('../.assets/models/GFPGANv1.3.onnx') 38 | }, 39 | 'gfpgan_1.4': 40 | { 41 | 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/GFPGANv1.4.onnx', 42 | 'path': resolve_relative_path('../.assets/models/GFPGANv1.4.onnx') 43 | }, 44 | 'gpen_bfr_512': 45 | { 46 | 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/GPEN-BFR-512.onnx', 47 | 'path': resolve_relative_path('../.assets/models/GPEN-BFR-512.onnx') 48 | } 49 | } 50 | OPTIONS : Optional[OptionsWithModel] = None 51 | 52 | 53 | def get_frame_processor() -> Any: 54 | global FRAME_PROCESSOR 55 | 56 | with THREAD_LOCK: 57 | if FRAME_PROCESSOR is None: 58 | model_path = get_options('model').get('path') 59 | FRAME_PROCESSOR = onnxruntime.InferenceSession(model_path, providers = facefusion.globals.execution_providers) 60 | return FRAME_PROCESSOR 61 | 62 | 63 | def clear_frame_processor() -> None: 64 | global FRAME_PROCESSOR 65 | 66 | FRAME_PROCESSOR = None 67 | 68 | 69 | def get_options(key : Literal[ 'model' ]) -> Any: 70 | global OPTIONS 71 | 72 | if OPTIONS is None: 73 | OPTIONS =\ 74 | { 75 | 'model': MODELS[frame_processors_globals.face_enhancer_model] 76 | } 77 | return OPTIONS.get(key) 78 | 79 | 80 | def set_options(key : Literal[ 'model' ], value : Any) -> None: 81 | global OPTIONS 82 | 83 | OPTIONS[key] = value 84 | 85 | 86 | def register_args(program : ArgumentParser) -> None: 87 | program.add_argument('--face-enhancer-model', help = wording.get('frame_processor_model_help'), dest = 'face_enhancer_model', default = 'gfpgan_1.4', choices = frame_processors_choices.face_enhancer_models) 88 | program.add_argument('--face-enhancer-blend', help = wording.get('frame_processor_blend_help'), dest= 'face_enhancer_blend', type = int, default= 100, choices = range(101), metavar = '[0-100]') 89 | 90 | 91 | def apply_args(program : ArgumentParser) -> None: 92 | args = program.parse_args([]) 93 | frame_processors_globals.face_enhancer_model = args.face_enhancer_model 94 | frame_processors_globals.face_enhancer_blend = args.face_enhancer_blend 95 | 96 | 97 | def pre_check() -> bool: 98 | if not facefusion.globals.skip_download: 99 | download_directory_path = resolve_relative_path('../.assets/models') 100 | model_url = get_options('model').get('url') 101 | conditional_download(download_directory_path, [ model_url ]) 102 | return True 103 | 104 | 105 | def pre_process(mode : ProcessMode) -> bool: 106 | model_url = get_options('model').get('url') 107 | model_path = get_options('model').get('path') 108 | if not facefusion.globals.skip_download and not is_download_done(model_url, model_path): 109 | update_status(wording.get('model_download_not_done') + wording.get('exclamation_mark'), NAME) 110 | return False 111 | elif not is_file(model_path): 112 | update_status(wording.get('model_file_not_present') + wording.get('exclamation_mark'), NAME) 113 | return False 114 | if mode in [ 'output', 'preview' ] and not is_image(facefusion.globals.target_path) and not is_video(facefusion.globals.target_path): 115 | update_status(wording.get('select_image_or_video_target') + wording.get('exclamation_mark'), NAME) 116 | return False 117 | if mode == 'output' and not facefusion.globals.output_path: 118 | update_status(wording.get('select_file_or_directory_output') + wording.get('exclamation_mark'), NAME) 119 | return False 120 | return True 121 | 122 | 123 | def post_process() -> None: 124 | clear_frame_processor() 125 | clear_face_analyser() 126 | read_static_image.cache_clear() 127 | 128 | 129 | def enhance_face(target_face: Face, temp_frame: Frame) -> Frame: 130 | frame_processor = get_frame_processor() 131 | crop_frame, affine_matrix = warp_face(target_face, temp_frame) 132 | crop_frame = prepare_crop_frame(crop_frame) 133 | frame_processor_inputs = {} 134 | for frame_processor_input in frame_processor.get_inputs(): 135 | if frame_processor_input.name == 'input': 136 | frame_processor_inputs[frame_processor_input.name] = crop_frame 137 | if frame_processor_input.name == 'weight': 138 | frame_processor_inputs[frame_processor_input.name] = numpy.array([ 1 ], dtype = numpy.double) 139 | with THREAD_SEMAPHORE: 140 | crop_frame = frame_processor.run(None, frame_processor_inputs)[0][0] 141 | crop_frame = normalize_crop_frame(crop_frame) 142 | paste_frame = paste_back(temp_frame, crop_frame, affine_matrix) 143 | temp_frame = blend_frame(temp_frame, paste_frame) 144 | return temp_frame 145 | 146 | 147 | def warp_face(target_face : Face, temp_frame : Frame) -> Tuple[Frame, Matrix]: 148 | template = numpy.array( 149 | [ 150 | [ 192.98138, 239.94708 ], 151 | [ 318.90277, 240.1936 ], 152 | [ 256.63416, 314.01935 ], 153 | [ 201.26117, 371.41043 ], 154 | [ 313.08905, 371.15118 ] 155 | ]) 156 | affine_matrix = cv2.estimateAffinePartial2D(target_face['kps'], template, method = cv2.LMEDS)[0] 157 | crop_frame = cv2.warpAffine(temp_frame, affine_matrix, (512, 512)) 158 | return crop_frame, affine_matrix 159 | 160 | 161 | def prepare_crop_frame(crop_frame : Frame) -> Frame: 162 | crop_frame = crop_frame[:, :, ::-1] / 255.0 163 | crop_frame = (crop_frame - 0.5) / 0.5 164 | crop_frame = numpy.expand_dims(crop_frame.transpose(2, 0, 1), axis = 0).astype(numpy.float32) 165 | return crop_frame 166 | 167 | 168 | def normalize_crop_frame(crop_frame : Frame) -> Frame: 169 | crop_frame = numpy.clip(crop_frame, -1, 1) 170 | crop_frame = (crop_frame + 1) / 2 171 | crop_frame = crop_frame.transpose(1, 2, 0) 172 | crop_frame = (crop_frame * 255.0).round() 173 | crop_frame = crop_frame.astype(numpy.uint8)[:, :, ::-1] 174 | return crop_frame 175 | 176 | 177 | def paste_back(temp_frame : Frame, crop_frame : Frame, affine_matrix : Matrix) -> Frame: 178 | inverse_affine_matrix = cv2.invertAffineTransform(affine_matrix) 179 | temp_frame_height, temp_frame_width = temp_frame.shape[0:2] 180 | crop_frame_height, crop_frame_width = crop_frame.shape[0:2] 181 | inverse_crop_frame = cv2.warpAffine(crop_frame, inverse_affine_matrix, (temp_frame_width, temp_frame_height)) 182 | inverse_mask = numpy.ones((crop_frame_height, crop_frame_width, 3), dtype = numpy.float32) 183 | inverse_mask_frame = cv2.warpAffine(inverse_mask, inverse_affine_matrix, (temp_frame_width, temp_frame_height)) 184 | inverse_mask_frame = cv2.erode(inverse_mask_frame, numpy.ones((2, 2))) 185 | inverse_mask_border = inverse_mask_frame * inverse_crop_frame 186 | inverse_mask_area = numpy.sum(inverse_mask_frame) // 3 187 | inverse_mask_edge = int(inverse_mask_area ** 0.5) // 20 188 | inverse_mask_radius = inverse_mask_edge * 2 189 | inverse_mask_center = cv2.erode(inverse_mask_frame, numpy.ones((inverse_mask_radius, inverse_mask_radius))) 190 | inverse_mask_blur_size = inverse_mask_edge * 2 + 1 191 | inverse_mask_blur_area = cv2.GaussianBlur(inverse_mask_center, (inverse_mask_blur_size, inverse_mask_blur_size), 0) 192 | temp_frame = inverse_mask_blur_area * inverse_mask_border + (1 - inverse_mask_blur_area) * temp_frame 193 | temp_frame = temp_frame.clip(0, 255).astype(numpy.uint8) 194 | return temp_frame 195 | 196 | 197 | def blend_frame(temp_frame : Frame, paste_frame : Frame) -> Frame: 198 | face_enhancer_blend = 1 - (frame_processors_globals.face_enhancer_blend / 100) 199 | temp_frame = cv2.addWeighted(temp_frame, face_enhancer_blend, paste_frame, 1 - face_enhancer_blend, 0) 200 | return temp_frame 201 | 202 | 203 | def process_frame(source_face : Face, reference_face : Face, temp_frame : Frame) -> Frame: 204 | many_faces = get_many_faces(temp_frame) 205 | if many_faces: 206 | for target_face in many_faces: 207 | temp_frame = enhance_face(target_face, temp_frame) 208 | return temp_frame 209 | 210 | 211 | def process_frames(source_path : str, temp_frame_paths : List[str], update_progress : Update_Process) -> None: 212 | for temp_frame_path in temp_frame_paths: 213 | temp_frame = read_image(temp_frame_path) 214 | result_frame = process_frame(None, None, temp_frame) 215 | write_image(temp_frame_path, result_frame) 216 | update_progress() 217 | 218 | 219 | def process_image(source_path : str, target_path : str, output_path : str) -> None: 220 | target_frame = read_static_image(target_path) 221 | result_frame = process_frame(None, None, target_frame) 222 | write_image(output_path, result_frame) 223 | 224 | 225 | def process_video(source_path : str, temp_frame_paths : List[str]) -> None: 226 | facefusion.processors.frame.core.multi_process_frames(None, temp_frame_paths, process_frames) 227 | -------------------------------------------------------------------------------- /facefusion/processors/frame/modules/face_swapper.py: -------------------------------------------------------------------------------- 1 | from typing import Any, List, Dict, Literal, Optional 2 | from argparse import ArgumentParser 3 | import insightface 4 | import threading 5 | 6 | import facefusion.globals 7 | import facefusion.processors.frame.core as frame_processors 8 | from facefusion import wording 9 | from facefusion.core import update_status 10 | from facefusion.face_analyser import get_one_face, get_many_faces, find_similar_faces, clear_face_analyser 11 | from facefusion.face_reference import get_face_reference, set_face_reference 12 | from facefusion.typing import Face, Frame, Update_Process, ProcessMode, ModelValue, OptionsWithModel 13 | from facefusion.utilities import conditional_download, resolve_relative_path, is_image, is_video, is_file, is_download_done 14 | from facefusion.vision import read_image, read_static_image, write_image 15 | from facefusion.processors.frame import globals as frame_processors_globals 16 | from facefusion.processors.frame import choices as frame_processors_choices 17 | 18 | FRAME_PROCESSOR = None 19 | THREAD_LOCK : threading.Lock = threading.Lock() 20 | NAME = 'FACEFUSION.FRAME_PROCESSOR.FACE_SWAPPER' 21 | MODELS : Dict[str, ModelValue] =\ 22 | { 23 | 'inswapper_128': 24 | { 25 | 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/inswapper_128.onnx', 26 | 'path': resolve_relative_path('../.assets/models/inswapper_128.onnx') 27 | }, 28 | 'inswapper_128_fp16': 29 | { 30 | 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/inswapper_128_fp16.onnx', 31 | 'path': resolve_relative_path('../.assets/models/inswapper_128_fp16.onnx') 32 | } 33 | } 34 | OPTIONS : Optional[OptionsWithModel] = None 35 | 36 | 37 | def get_frame_processor() -> Any: 38 | global FRAME_PROCESSOR 39 | 40 | with THREAD_LOCK: 41 | if FRAME_PROCESSOR is None: 42 | model_path = get_options('model').get('path') 43 | FRAME_PROCESSOR = insightface.model_zoo.get_model(model_path, providers = facefusion.globals.execution_providers) 44 | return FRAME_PROCESSOR 45 | 46 | 47 | def clear_frame_processor() -> None: 48 | global FRAME_PROCESSOR 49 | 50 | FRAME_PROCESSOR = None 51 | 52 | 53 | def get_options(key : Literal[ 'model' ]) -> Any: 54 | global OPTIONS 55 | 56 | if OPTIONS is None: 57 | OPTIONS = \ 58 | { 59 | 'model': MODELS[frame_processors_globals.face_swapper_model] 60 | } 61 | return OPTIONS.get(key) 62 | 63 | 64 | def set_options(key : Literal[ 'model' ], value : Any) -> None: 65 | global OPTIONS 66 | 67 | OPTIONS[key] = value 68 | 69 | 70 | def register_args(program : ArgumentParser) -> None: 71 | program.add_argument('--face-swapper-model', help = wording.get('frame_processor_model_help'), dest = 'face_swapper_model', default = 'inswapper_128', choices = frame_processors_choices.face_swapper_models) 72 | 73 | 74 | def apply_args(program : ArgumentParser) -> None: 75 | args = program.parse_args([]) 76 | frame_processors_globals.face_swapper_model = args.face_swapper_model 77 | 78 | 79 | def pre_check() -> bool: 80 | if not facefusion.globals.skip_download: 81 | download_directory_path = resolve_relative_path('../.assets/models') 82 | model_url = get_options('model').get('url') 83 | conditional_download(download_directory_path, [ model_url ]) 84 | return True 85 | 86 | 87 | def pre_process(mode : ProcessMode) -> bool: 88 | model_url = get_options('model').get('url') 89 | model_path = get_options('model').get('path') 90 | if not facefusion.globals.skip_download and not is_download_done(model_url, model_path): 91 | update_status(wording.get('model_download_not_done') + wording.get('exclamation_mark'), NAME) 92 | return False 93 | elif not is_file(model_path): 94 | update_status(wording.get('model_file_not_present') + wording.get('exclamation_mark'), NAME) 95 | return False 96 | if not is_image(facefusion.globals.source_path): 97 | update_status(wording.get('select_image_source') + wording.get('exclamation_mark'), NAME) 98 | return False 99 | elif not get_one_face(read_static_image(facefusion.globals.source_path)): 100 | update_status(wording.get('no_source_face_detected') + wording.get('exclamation_mark'), NAME) 101 | return False 102 | if mode in [ 'output', 'preview' ] and not is_image(facefusion.globals.target_path) and not is_video(facefusion.globals.target_path): 103 | update_status(wording.get('select_image_or_video_target') + wording.get('exclamation_mark'), NAME) 104 | return False 105 | if mode == 'output' and not facefusion.globals.output_path: 106 | update_status(wording.get('select_file_or_directory_output') + wording.get('exclamation_mark'), NAME) 107 | return False 108 | return True 109 | 110 | 111 | def post_process() -> None: 112 | clear_frame_processor() 113 | clear_face_analyser() 114 | read_static_image.cache_clear() 115 | 116 | 117 | def swap_face(source_face : Face, target_face : Face, temp_frame : Frame) -> Frame: 118 | return get_frame_processor().get(temp_frame, target_face, source_face, paste_back = True) 119 | 120 | 121 | def process_frame(source_face : Face, reference_face : Face, temp_frame : Frame) -> Frame: 122 | if 'reference' in facefusion.globals.face_recognition: 123 | similar_faces = find_similar_faces(temp_frame, reference_face, facefusion.globals.reference_face_distance) 124 | if similar_faces: 125 | for similar_face in similar_faces: 126 | temp_frame = swap_face(source_face, similar_face, temp_frame) 127 | if 'many' in facefusion.globals.face_recognition: 128 | many_faces = get_many_faces(temp_frame) 129 | if many_faces: 130 | for target_face in many_faces: 131 | temp_frame = swap_face(source_face, target_face, temp_frame) 132 | return temp_frame 133 | 134 | 135 | def process_frames(source_path : str, temp_frame_paths : List[str], update_progress : Update_Process) -> None: 136 | source_face = get_one_face(read_static_image(source_path)) 137 | reference_face = get_face_reference() if 'reference' in facefusion.globals.face_recognition else None 138 | for temp_frame_path in temp_frame_paths: 139 | temp_frame = read_image(temp_frame_path) 140 | result_frame = process_frame(source_face, reference_face, temp_frame) 141 | write_image(temp_frame_path, result_frame) 142 | update_progress() 143 | 144 | 145 | def process_image(source_path : str, target_path : str, output_path : str) -> None: 146 | source_face = get_one_face(read_static_image(source_path)) 147 | target_frame = read_static_image(target_path) 148 | reference_face = get_one_face(target_frame, facefusion.globals.reference_face_position) if 'reference' in facefusion.globals.face_recognition else None 149 | result_frame = process_frame(source_face, reference_face, target_frame) 150 | write_image(output_path, result_frame) 151 | 152 | 153 | def process_video(source_path : str, temp_frame_paths : List[str]) -> None: 154 | conditional_set_face_reference(temp_frame_paths) 155 | frame_processors.multi_process_frames(source_path, temp_frame_paths, process_frames) 156 | 157 | 158 | def conditional_set_face_reference(temp_frame_paths : List[str]) -> None: 159 | if 'reference' in facefusion.globals.face_recognition and not get_face_reference(): 160 | reference_frame = read_static_image(temp_frame_paths[facefusion.globals.reference_frame_number]) 161 | reference_face = get_one_face(reference_frame, facefusion.globals.reference_face_position) 162 | set_face_reference(reference_face) 163 | -------------------------------------------------------------------------------- /facefusion/processors/frame/modules/frame_enhancer.py: -------------------------------------------------------------------------------- 1 | from typing import Any, List, Dict, Literal, Optional 2 | from argparse import ArgumentParser 3 | import threading 4 | import cv2 5 | from basicsr.archs.rrdbnet_arch import RRDBNet 6 | from realesrgan import RealESRGANer 7 | 8 | import facefusion.globals 9 | import facefusion.processors.frame.core as frame_processors 10 | from facefusion import wording 11 | from facefusion.core import update_status 12 | from facefusion.face_analyser import clear_face_analyser 13 | from facefusion.typing import Frame, Face, Update_Process, ProcessMode, ModelValue, OptionsWithModel 14 | from facefusion.utilities import conditional_download, resolve_relative_path, is_file, is_download_done, get_device 15 | from facefusion.vision import read_image, read_static_image, write_image 16 | from facefusion.processors.frame import globals as frame_processors_globals 17 | from facefusion.processors.frame import choices as frame_processors_choices 18 | 19 | FRAME_PROCESSOR = None 20 | THREAD_SEMAPHORE : threading.Semaphore = threading.Semaphore() 21 | THREAD_LOCK : threading.Lock = threading.Lock() 22 | NAME = 'FACEFUSION.FRAME_PROCESSOR.FRAME_ENHANCER' 23 | MODELS: Dict[str, ModelValue] =\ 24 | { 25 | 'realesrgan_x2plus': 26 | { 27 | 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/RealESRGAN_x2plus.pth', 28 | 'path': resolve_relative_path('../.assets/models/RealESRGAN_x2plus.pth'), 29 | 'scale': 2 30 | }, 31 | 'realesrgan_x4plus': 32 | { 33 | 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/RealESRGAN_x4plus.pth', 34 | 'path': resolve_relative_path('../.assets/models/RealESRGAN_x4plus.pth'), 35 | 'scale': 4 36 | }, 37 | 'realesrnet_x4plus': 38 | { 39 | 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/RealESRNet_x4plus.pth', 40 | 'path': resolve_relative_path('../.assets/models/RealESRNet_x4plus.pth'), 41 | 'scale': 4 42 | } 43 | } 44 | OPTIONS : Optional[OptionsWithModel] = None 45 | 46 | 47 | def get_frame_processor() -> Any: 48 | global FRAME_PROCESSOR 49 | 50 | with THREAD_LOCK: 51 | if FRAME_PROCESSOR is None: 52 | model_path = get_options('model').get('path') 53 | model_scale = get_options('model').get('scale') 54 | FRAME_PROCESSOR = RealESRGANer( 55 | model_path = model_path, 56 | model = RRDBNet( 57 | num_in_ch = 3, 58 | num_out_ch = 3, 59 | scale = model_scale 60 | ), 61 | device = get_device(facefusion.globals.execution_providers), 62 | scale = model_scale 63 | ) 64 | return FRAME_PROCESSOR 65 | 66 | 67 | def clear_frame_processor() -> None: 68 | global FRAME_PROCESSOR 69 | 70 | FRAME_PROCESSOR = None 71 | 72 | 73 | def get_options(key : Literal[ 'model' ]) -> Any: 74 | global OPTIONS 75 | 76 | if OPTIONS is None: 77 | OPTIONS = \ 78 | { 79 | 'model': MODELS[frame_processors_globals.frame_enhancer_model] 80 | } 81 | return OPTIONS.get(key) 82 | 83 | 84 | def set_options(key : Literal[ 'model' ], value : Any) -> None: 85 | global OPTIONS 86 | 87 | OPTIONS[key] = value 88 | 89 | 90 | def register_args(program : ArgumentParser) -> None: 91 | program.add_argument('--frame-enhancer-model', help = wording.get('frame_processor_model_help'), dest = 'frame_enhancer_model', default = 'realesrgan_x2plus', choices = frame_processors_choices.frame_enhancer_models) 92 | program.add_argument('--frame-enhancer-blend', help = wording.get('frame_processor_blend_help'), dest = 'frame_enhancer_blend', type = int, default = 100, choices = range(101), metavar = '[0-100]') 93 | 94 | 95 | def apply_args(program : ArgumentParser) -> None: 96 | args = program.parse_args([]) 97 | frame_processors_globals.frame_enhancer_model = args.frame_enhancer_model 98 | frame_processors_globals.frame_enhancer_blend = args.frame_enhancer_blend 99 | 100 | 101 | def pre_check() -> bool: 102 | if not facefusion.globals.skip_download: 103 | download_directory_path = resolve_relative_path('../.assets/models') 104 | model_url = get_options('model').get('url') 105 | conditional_download(download_directory_path, [ model_url ]) 106 | return True 107 | 108 | 109 | def pre_process(mode : ProcessMode) -> bool: 110 | model_url = get_options('model').get('url') 111 | model_path = get_options('model').get('path') 112 | if not facefusion.globals.skip_download and not is_download_done(model_url, model_path): 113 | update_status(wording.get('model_download_not_done') + wording.get('exclamation_mark'), NAME) 114 | return False 115 | elif not is_file(model_path): 116 | update_status(wording.get('model_file_not_present') + wording.get('exclamation_mark'), NAME) 117 | return False 118 | if mode == 'output' and not facefusion.globals.output_path: 119 | update_status(wording.get('select_file_or_directory_output') + wording.get('exclamation_mark'), NAME) 120 | return False 121 | return True 122 | 123 | 124 | def post_process() -> None: 125 | clear_frame_processor() 126 | clear_face_analyser() 127 | read_static_image.cache_clear() 128 | 129 | 130 | def enhance_frame(temp_frame : Frame) -> Frame: 131 | with THREAD_SEMAPHORE: 132 | paste_frame, _ = get_frame_processor().enhance(temp_frame) 133 | temp_frame = blend_frame(temp_frame, paste_frame) 134 | return temp_frame 135 | 136 | 137 | def blend_frame(temp_frame : Frame, paste_frame : Frame) -> Frame: 138 | frame_enhancer_blend = 1 - (frame_processors_globals.frame_enhancer_blend / 100) 139 | temp_frame = cv2.resize(temp_frame, (paste_frame.shape[1], paste_frame.shape[0])) 140 | temp_frame = cv2.addWeighted(temp_frame, frame_enhancer_blend, paste_frame, 1 - frame_enhancer_blend, 0) 141 | return temp_frame 142 | 143 | 144 | def process_frame(source_face : Face, reference_face : Face, temp_frame : Frame) -> Frame: 145 | return enhance_frame(temp_frame) 146 | 147 | 148 | def process_frames(source_path : str, temp_frame_paths : List[str], update_progress : Update_Process) -> None: 149 | for temp_frame_path in temp_frame_paths: 150 | temp_frame = read_image(temp_frame_path) 151 | result_frame = process_frame(None, None, temp_frame) 152 | write_image(temp_frame_path, result_frame) 153 | update_progress() 154 | 155 | 156 | def process_image(source_path : str, target_path : str, output_path : str) -> None: 157 | target_frame = read_static_image(target_path) 158 | result = process_frame(None, None, target_frame) 159 | write_image(output_path, result) 160 | 161 | 162 | def process_video(source_path : str, temp_frame_paths : List[str]) -> None: 163 | frame_processors.multi_process_frames(None, temp_frame_paths, process_frames) 164 | -------------------------------------------------------------------------------- /facefusion/typing.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Literal, Callable, List, TypedDict, Dict 2 | from insightface.app.common import Face 3 | import numpy 4 | 5 | Face = Face 6 | Frame = numpy.ndarray[Any, Any] 7 | Matrix = numpy.ndarray[Any, Any] 8 | 9 | Update_Process = Callable[[], None] 10 | Process_Frames = Callable[[str, List[str], Update_Process], None] 11 | 12 | ProcessMode = Literal[ 'output', 'preview', 'stream' ] 13 | FaceRecognition = Literal[ 'reference', 'many' ] 14 | FaceAnalyserDirection = Literal[ 'left-right', 'right-left', 'top-bottom', 'bottom-top', 'small-large', 'large-small' ] 15 | FaceAnalyserAge = Literal[ 'child', 'teen', 'adult', 'senior' ] 16 | FaceAnalyserGender = Literal[ 'male', 'female' ] 17 | TempFrameFormat = Literal[ 'jpg', 'png' ] 18 | OutputVideoEncoder = Literal[ 'libx264', 'libx265', 'libvpx-vp9', 'h264_nvenc', 'hevc_nvenc' ] 19 | 20 | ModelValue = Dict['str', Any] 21 | OptionsWithModel = TypedDict('OptionsWithModel', 22 | { 23 | 'model' : ModelValue 24 | }) 25 | -------------------------------------------------------------------------------- /facefusion/uis/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/diffus-me/sd-webui-facefusion/f84fed74bd20374112c7009f8d75c391d64cfa00/facefusion/uis/__init__.py -------------------------------------------------------------------------------- /facefusion/uis/assets/fixes.css: -------------------------------------------------------------------------------- 1 | :root:root:root button:not([class]) 2 | { 3 | border-radius: 0.375rem; 4 | float: left; 5 | overflow: hidden; 6 | width: 100%; 7 | } 8 | -------------------------------------------------------------------------------- /facefusion/uis/assets/overrides.css: -------------------------------------------------------------------------------- 1 | :root:root:root input[type="number"] 2 | { 3 | max-width: 6rem; 4 | } 5 | 6 | :root:root:root [type="checkbox"], 7 | :root:root:root [type="radio"] 8 | { 9 | border-radius: 50%; 10 | height: 1.125rem; 11 | width: 1.125rem; 12 | } 13 | 14 | :root:root:root input[type="range"] 15 | { 16 | height: 0.5rem; 17 | } 18 | 19 | :root:root:root input[type="range"]::-moz-range-thumb, 20 | :root:root:root input[type="range"]::-webkit-slider-thumb 21 | { 22 | background: var(--neutral-300); 23 | border: unset; 24 | border-radius: 50%; 25 | height: 1.125rem; 26 | width: 1.125rem; 27 | } 28 | 29 | :root:root:root input[type="range"]::-webkit-slider-thumb 30 | { 31 | margin-top: 0.375rem; 32 | } 33 | -------------------------------------------------------------------------------- /facefusion/uis/choices.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | 3 | from facefusion.uis.typing import WebcamMode 4 | 5 | common_options : List[str] = [ 'keep-fps', 'keep-temp', 'skip-audio', 'skip-download' ] 6 | webcam_modes : List[WebcamMode] = [ 'inline', 'udp', 'v4l2' ] 7 | webcam_resolutions : List[str] = [ '320x240', '640x480', '1280x720', '1920x1080', '2560x1440', '3840x2160' ] 8 | -------------------------------------------------------------------------------- /facefusion/uis/components/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/diffus-me/sd-webui-facefusion/f84fed74bd20374112c7009f8d75c391d64cfa00/facefusion/uis/components/__init__.py -------------------------------------------------------------------------------- /facefusion/uis/components/about.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | import gradio 3 | 4 | from facefusion import metadata, wording 5 | 6 | ABOUT_BUTTON : Optional[gradio.HTML] = None 7 | DONATE_BUTTON : Optional[gradio.HTML] = None 8 | 9 | 10 | def render() -> None: 11 | global ABOUT_BUTTON 12 | global DONATE_BUTTON 13 | 14 | ABOUT_BUTTON = gradio.Button( 15 | value = metadata.get('name') + ' ' + metadata.get('version'), 16 | variant = 'primary', 17 | link = metadata.get('url') 18 | ) 19 | DONATE_BUTTON = gradio.Button( 20 | value = wording.get('donate_button_label'), 21 | link = 'https://donate.facefusion.io', 22 | size = 'sm' 23 | ) 24 | -------------------------------------------------------------------------------- /facefusion/uis/components/benchmark.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Optional, List, Dict, Generator 2 | import time 3 | import tempfile 4 | import statistics 5 | import gradio 6 | 7 | import facefusion.globals 8 | from facefusion import wording 9 | from facefusion.face_analyser import get_face_analyser 10 | from facefusion.face_cache import clear_faces_cache 11 | from facefusion.processors.frame.core import get_frame_processors_modules 12 | from facefusion.vision import count_video_frame_total 13 | from facefusion.core import limit_resources, conditional_process 14 | from facefusion.utilities import normalize_output_path, clear_temp 15 | from facefusion.uis.typing import Update 16 | from facefusion.uis.core import get_ui_component 17 | 18 | BENCHMARK_RESULTS_DATAFRAME : Optional[gradio.Dataframe] = None 19 | BENCHMARK_START_BUTTON : Optional[gradio.Button] = None 20 | BENCHMARK_CLEAR_BUTTON : Optional[gradio.Button] = None 21 | BENCHMARKS : Dict[str, str] =\ 22 | { 23 | '240p': '.assets/examples/target-240p.mp4', 24 | '360p': '.assets/examples/target-360p.mp4', 25 | '540p': '.assets/examples/target-540p.mp4', 26 | '720p': '.assets/examples/target-720p.mp4', 27 | '1080p': '.assets/examples/target-1080p.mp4', 28 | '1440p': '.assets/examples/target-1440p.mp4', 29 | '2160p': '.assets/examples/target-2160p.mp4' 30 | } 31 | 32 | 33 | def render() -> None: 34 | global BENCHMARK_RESULTS_DATAFRAME 35 | global BENCHMARK_START_BUTTON 36 | global BENCHMARK_CLEAR_BUTTON 37 | 38 | BENCHMARK_RESULTS_DATAFRAME = gradio.Dataframe( 39 | label = wording.get('benchmark_results_dataframe_label'), 40 | headers = 41 | [ 42 | 'target_path', 43 | 'benchmark_cycles', 44 | 'average_run', 45 | 'fastest_run', 46 | 'slowest_run', 47 | 'relative_fps' 48 | ], 49 | datatype = 50 | [ 51 | 'str', 52 | 'number', 53 | 'number', 54 | 'number', 55 | 'number', 56 | 'number' 57 | ] 58 | ) 59 | BENCHMARK_START_BUTTON = gradio.Button( 60 | value = wording.get('start_button_label'), 61 | variant = 'primary', 62 | size = 'sm' 63 | ) 64 | BENCHMARK_CLEAR_BUTTON = gradio.Button( 65 | value = wording.get('clear_button_label'), 66 | size = 'sm' 67 | ) 68 | 69 | 70 | def listen() -> None: 71 | benchmark_runs_checkbox_group = get_ui_component('benchmark_runs_checkbox_group') 72 | benchmark_cycles_slider = get_ui_component('benchmark_cycles_slider') 73 | if benchmark_runs_checkbox_group and benchmark_cycles_slider: 74 | BENCHMARK_START_BUTTON.click(start, inputs = [ benchmark_runs_checkbox_group, benchmark_cycles_slider ], outputs = BENCHMARK_RESULTS_DATAFRAME) 75 | BENCHMARK_CLEAR_BUTTON.click(clear, outputs = BENCHMARK_RESULTS_DATAFRAME) 76 | 77 | 78 | def start(benchmark_runs : List[str], benchmark_cycles : int) -> Generator[List[Any], None, None]: 79 | facefusion.globals.source_path = '.assets/examples/source.jpg' 80 | target_paths = [ BENCHMARKS[benchmark_run] for benchmark_run in benchmark_runs if benchmark_run in BENCHMARKS ] 81 | benchmark_results = [] 82 | if target_paths: 83 | pre_process() 84 | for target_path in target_paths: 85 | benchmark_results.append(benchmark(target_path, benchmark_cycles)) 86 | yield benchmark_results 87 | post_process() 88 | 89 | 90 | def pre_process() -> None: 91 | limit_resources() 92 | get_face_analyser() 93 | for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors): 94 | frame_processor_module.get_frame_processor() 95 | 96 | 97 | def post_process() -> None: 98 | clear_faces_cache() 99 | 100 | 101 | def benchmark(target_path : str, benchmark_cycles : int) -> List[Any]: 102 | process_times = [] 103 | total_fps = 0.0 104 | for i in range(benchmark_cycles): 105 | facefusion.globals.target_path = target_path 106 | facefusion.globals.output_path = normalize_output_path(facefusion.globals.source_path, facefusion.globals.target_path, tempfile.gettempdir()) 107 | video_frame_total = count_video_frame_total(facefusion.globals.target_path) 108 | start_time = time.perf_counter() 109 | conditional_process() 110 | end_time = time.perf_counter() 111 | process_time = end_time - start_time 112 | total_fps += video_frame_total / process_time 113 | process_times.append(process_time) 114 | average_run = round(statistics.mean(process_times), 2) 115 | fastest_run = round(min(process_times), 2) 116 | slowest_run = round(max(process_times), 2) 117 | relative_fps = round(total_fps / benchmark_cycles, 2) 118 | return\ 119 | [ 120 | facefusion.globals.target_path, 121 | benchmark_cycles, 122 | average_run, 123 | fastest_run, 124 | slowest_run, 125 | relative_fps 126 | ] 127 | 128 | 129 | def clear() -> Update: 130 | if facefusion.globals.target_path: 131 | clear_temp(facefusion.globals.target_path) 132 | return gradio.update(value = None) 133 | -------------------------------------------------------------------------------- /facefusion/uis/components/benchmark_options.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | import gradio 3 | 4 | from facefusion import wording 5 | from facefusion.uis.core import register_ui_component 6 | from facefusion.uis.components.benchmark import BENCHMARKS 7 | 8 | BENCHMARK_RUNS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None 9 | BENCHMARK_CYCLES_SLIDER : Optional[gradio.Button] = None 10 | 11 | 12 | def render() -> None: 13 | global BENCHMARK_RUNS_CHECKBOX_GROUP 14 | global BENCHMARK_CYCLES_SLIDER 15 | 16 | BENCHMARK_RUNS_CHECKBOX_GROUP = gradio.CheckboxGroup( 17 | label = wording.get('benchmark_runs_checkbox_group_label'), 18 | value = list(BENCHMARKS.keys()), 19 | choices = list(BENCHMARKS.keys()) 20 | ) 21 | BENCHMARK_CYCLES_SLIDER = gradio.Slider( 22 | label = wording.get('benchmark_cycles_slider_label'), 23 | value = 3, 24 | step = 1, 25 | minimum = 1, 26 | maximum = 10 27 | ) 28 | register_ui_component('benchmark_runs_checkbox_group', BENCHMARK_RUNS_CHECKBOX_GROUP) 29 | register_ui_component('benchmark_cycles_slider', BENCHMARK_CYCLES_SLIDER) 30 | -------------------------------------------------------------------------------- /facefusion/uis/components/common_options.py: -------------------------------------------------------------------------------- 1 | from typing import Optional, List 2 | import gradio 3 | 4 | import facefusion.globals 5 | from facefusion import wording 6 | from facefusion.uis import choices 7 | 8 | COMMON_OPTIONS_CHECKBOX_GROUP : Optional[gradio.Checkboxgroup] = None 9 | 10 | 11 | def render() -> None: 12 | global COMMON_OPTIONS_CHECKBOX_GROUP 13 | 14 | value = [] 15 | if facefusion.globals.keep_fps: 16 | value.append('keep-fps') 17 | if facefusion.globals.keep_temp: 18 | value.append('keep-temp') 19 | if facefusion.globals.skip_audio: 20 | value.append('skip-audio') 21 | if facefusion.globals.skip_download: 22 | value.append('skip-download') 23 | COMMON_OPTIONS_CHECKBOX_GROUP = gradio.Checkboxgroup( 24 | label = wording.get('common_options_checkbox_group_label'), 25 | choices = choices.common_options, 26 | value = value 27 | ) 28 | 29 | 30 | def listen() -> None: 31 | COMMON_OPTIONS_CHECKBOX_GROUP.change(update, inputs = COMMON_OPTIONS_CHECKBOX_GROUP) 32 | 33 | 34 | def update(common_options : List[str]) -> None: 35 | facefusion.globals.keep_fps = 'keep-fps' in common_options 36 | facefusion.globals.keep_temp = 'keep-temp' in common_options 37 | facefusion.globals.skip_audio = 'skip-audio' in common_options 38 | facefusion.globals.skip_download = 'skip-download' in common_options 39 | -------------------------------------------------------------------------------- /facefusion/uis/components/execution.py: -------------------------------------------------------------------------------- 1 | from typing import List, Optional 2 | import gradio 3 | import onnxruntime 4 | 5 | import facefusion.globals 6 | from facefusion import wording 7 | from facefusion.face_analyser import clear_face_analyser 8 | from facefusion.processors.frame.core import clear_frame_processors_modules 9 | from facefusion.utilities import encode_execution_providers, decode_execution_providers 10 | from facefusion.uis.typing import Update 11 | 12 | EXECUTION_PROVIDERS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None 13 | 14 | 15 | def render() -> None: 16 | global EXECUTION_PROVIDERS_CHECKBOX_GROUP 17 | 18 | EXECUTION_PROVIDERS_CHECKBOX_GROUP = gradio.CheckboxGroup( 19 | label = wording.get('execution_providers_checkbox_group_label'), 20 | choices = encode_execution_providers(onnxruntime.get_available_providers()), 21 | value = encode_execution_providers(facefusion.globals.execution_providers) 22 | ) 23 | 24 | 25 | def listen() -> None: 26 | EXECUTION_PROVIDERS_CHECKBOX_GROUP.change(update_execution_providers, inputs = EXECUTION_PROVIDERS_CHECKBOX_GROUP, outputs = EXECUTION_PROVIDERS_CHECKBOX_GROUP) 27 | 28 | 29 | def update_execution_providers(execution_providers : List[str]) -> Update: 30 | clear_face_analyser() 31 | clear_frame_processors_modules() 32 | if not execution_providers: 33 | execution_providers = encode_execution_providers(onnxruntime.get_available_providers()) 34 | facefusion.globals.execution_providers = decode_execution_providers(execution_providers) 35 | return gradio.update(value = execution_providers) 36 | -------------------------------------------------------------------------------- /facefusion/uis/components/execution_queue_count.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | import gradio 3 | 4 | import facefusion.globals 5 | from facefusion import wording 6 | 7 | EXECUTION_QUEUE_COUNT_SLIDER : Optional[gradio.Slider] = None 8 | 9 | 10 | def render() -> None: 11 | global EXECUTION_QUEUE_COUNT_SLIDER 12 | 13 | EXECUTION_QUEUE_COUNT_SLIDER = gradio.Slider( 14 | label = wording.get('execution_queue_count_slider_label'), 15 | value = facefusion.globals.execution_queue_count, 16 | step = 1, 17 | minimum = 1, 18 | maximum = 16 19 | ) 20 | 21 | 22 | def listen() -> None: 23 | EXECUTION_QUEUE_COUNT_SLIDER.change(update_execution_queue_count, inputs = EXECUTION_QUEUE_COUNT_SLIDER) 24 | 25 | 26 | def update_execution_queue_count(execution_queue_count : int = 1) -> None: 27 | facefusion.globals.execution_queue_count = execution_queue_count 28 | 29 | -------------------------------------------------------------------------------- /facefusion/uis/components/execution_thread_count.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | import gradio 3 | 4 | import facefusion.globals 5 | from facefusion import wording 6 | 7 | EXECUTION_THREAD_COUNT_SLIDER : Optional[gradio.Slider] = None 8 | 9 | 10 | def render() -> None: 11 | global EXECUTION_THREAD_COUNT_SLIDER 12 | 13 | EXECUTION_THREAD_COUNT_SLIDER = gradio.Slider( 14 | label = wording.get('execution_thread_count_slider_label'), 15 | value = facefusion.globals.execution_thread_count, 16 | step = 1, 17 | minimum = 1, 18 | maximum = 128 19 | ) 20 | 21 | 22 | def listen() -> None: 23 | EXECUTION_THREAD_COUNT_SLIDER.change(update_execution_thread_count, inputs = EXECUTION_THREAD_COUNT_SLIDER) 24 | 25 | 26 | def update_execution_thread_count(execution_thread_count : int = 1) -> None: 27 | facefusion.globals.execution_thread_count = execution_thread_count 28 | 29 | -------------------------------------------------------------------------------- /facefusion/uis/components/face_analyser.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | 3 | import gradio 4 | 5 | import facefusion.choices 6 | import facefusion.globals 7 | from facefusion import wording 8 | from facefusion.uis.core import register_ui_component 9 | 10 | FACE_ANALYSER_DIRECTION_DROPDOWN : Optional[gradio.Dropdown] = None 11 | FACE_ANALYSER_AGE_DROPDOWN : Optional[gradio.Dropdown] = None 12 | FACE_ANALYSER_GENDER_DROPDOWN : Optional[gradio.Dropdown] = None 13 | 14 | 15 | def render() -> None: 16 | global FACE_ANALYSER_DIRECTION_DROPDOWN 17 | global FACE_ANALYSER_AGE_DROPDOWN 18 | global FACE_ANALYSER_GENDER_DROPDOWN 19 | 20 | FACE_ANALYSER_DIRECTION_DROPDOWN = gradio.Dropdown( 21 | label = wording.get('face_analyser_direction_dropdown_label'), 22 | choices = facefusion.choices.face_analyser_directions, 23 | value = facefusion.globals.face_analyser_direction 24 | ) 25 | FACE_ANALYSER_AGE_DROPDOWN = gradio.Dropdown( 26 | label = wording.get('face_analyser_age_dropdown_label'), 27 | choices = ['none'] + facefusion.choices.face_analyser_ages, 28 | value = facefusion.globals.face_analyser_age or 'none' 29 | ) 30 | FACE_ANALYSER_GENDER_DROPDOWN = gradio.Dropdown( 31 | label = wording.get('face_analyser_gender_dropdown_label'), 32 | choices = ['none'] + facefusion.choices.face_analyser_genders, 33 | value = facefusion.globals.face_analyser_gender or 'none' 34 | ) 35 | register_ui_component('face_analyser_direction_dropdown', FACE_ANALYSER_DIRECTION_DROPDOWN) 36 | register_ui_component('face_analyser_age_dropdown', FACE_ANALYSER_AGE_DROPDOWN) 37 | register_ui_component('face_analyser_gender_dropdown', FACE_ANALYSER_GENDER_DROPDOWN) 38 | 39 | 40 | def listen() -> None: 41 | FACE_ANALYSER_DIRECTION_DROPDOWN.select(lambda value: update_dropdown('face_analyser_direction', value), inputs = FACE_ANALYSER_DIRECTION_DROPDOWN) 42 | FACE_ANALYSER_AGE_DROPDOWN.select(lambda value: update_dropdown('face_analyser_age', value), inputs = FACE_ANALYSER_AGE_DROPDOWN) 43 | FACE_ANALYSER_GENDER_DROPDOWN.select(lambda value: update_dropdown('face_analyser_gender', value), inputs = FACE_ANALYSER_GENDER_DROPDOWN) 44 | 45 | 46 | def update_dropdown(name : str, value : str) -> None: 47 | if value == 'none': 48 | setattr(facefusion.globals, name, None) 49 | else: 50 | setattr(facefusion.globals, name, value) 51 | -------------------------------------------------------------------------------- /facefusion/uis/components/face_selector.py: -------------------------------------------------------------------------------- 1 | from typing import List, Optional, Tuple, Any, Dict 2 | 3 | import gradio 4 | 5 | import facefusion.choices 6 | import facefusion.globals 7 | from facefusion import wording 8 | from facefusion.vision import get_video_frame, normalize_frame_color, read_static_image 9 | from facefusion.face_analyser import get_many_faces 10 | from facefusion.face_reference import clear_face_reference 11 | from facefusion.typing import Frame, FaceRecognition 12 | from facefusion.utilities import is_image, is_video 13 | from facefusion.uis.core import get_ui_component, register_ui_component 14 | from facefusion.uis.typing import ComponentName, Update 15 | 16 | FACE_RECOGNITION_DROPDOWN : Optional[gradio.Dropdown] = None 17 | REFERENCE_FACE_POSITION_GALLERY : Optional[gradio.Gallery] = None 18 | REFERENCE_FACE_DISTANCE_SLIDER : Optional[gradio.Slider] = None 19 | 20 | 21 | def render() -> None: 22 | global FACE_RECOGNITION_DROPDOWN 23 | global REFERENCE_FACE_POSITION_GALLERY 24 | global REFERENCE_FACE_DISTANCE_SLIDER 25 | 26 | reference_face_gallery_args: Dict[str, Any] =\ 27 | { 28 | 'label': wording.get('reference_face_gallery_label'), 29 | 'height': 120, 30 | 'object_fit': 'cover', 31 | 'columns': 10, 32 | 'allow_preview': False, 33 | 'visible': 'reference' in facefusion.globals.face_recognition 34 | } 35 | if is_image(facefusion.globals.target_path): 36 | reference_frame = read_static_image(facefusion.globals.target_path) 37 | reference_face_gallery_args['value'] = extract_gallery_frames(reference_frame) 38 | if is_video(facefusion.globals.target_path): 39 | reference_frame = get_video_frame(facefusion.globals.target_path, facefusion.globals.reference_frame_number) 40 | reference_face_gallery_args['value'] = extract_gallery_frames(reference_frame) 41 | FACE_RECOGNITION_DROPDOWN = gradio.Dropdown( 42 | label = wording.get('face_recognition_dropdown_label'), 43 | choices = facefusion.choices.face_recognitions, 44 | value = facefusion.globals.face_recognition 45 | ) 46 | REFERENCE_FACE_POSITION_GALLERY = gradio.Gallery(**reference_face_gallery_args) 47 | REFERENCE_FACE_DISTANCE_SLIDER = gradio.Slider( 48 | label = wording.get('reference_face_distance_slider_label'), 49 | value = facefusion.globals.reference_face_distance, 50 | step = 0.05, 51 | minimum = 0, 52 | maximum = 3, 53 | visible = 'reference' in facefusion.globals.face_recognition 54 | ) 55 | register_ui_component('face_recognition_dropdown', FACE_RECOGNITION_DROPDOWN) 56 | register_ui_component('reference_face_position_gallery', REFERENCE_FACE_POSITION_GALLERY) 57 | register_ui_component('reference_face_distance_slider', REFERENCE_FACE_DISTANCE_SLIDER) 58 | 59 | 60 | def listen() -> None: 61 | FACE_RECOGNITION_DROPDOWN.select(update_face_recognition, inputs = FACE_RECOGNITION_DROPDOWN, outputs = [ REFERENCE_FACE_POSITION_GALLERY, REFERENCE_FACE_DISTANCE_SLIDER ]) 62 | REFERENCE_FACE_POSITION_GALLERY.select(clear_and_update_face_reference_position) 63 | REFERENCE_FACE_DISTANCE_SLIDER.change(update_reference_face_distance, inputs = REFERENCE_FACE_DISTANCE_SLIDER) 64 | multi_component_names : List[ComponentName] =\ 65 | [ 66 | 'source_image', 67 | 'target_image', 68 | 'target_video' 69 | ] 70 | for component_name in multi_component_names: 71 | component = get_ui_component(component_name) 72 | if component: 73 | for method in [ 'upload', 'change', 'clear' ]: 74 | getattr(component, method)(update_face_reference_position, outputs = REFERENCE_FACE_POSITION_GALLERY) 75 | select_component_names : List[ComponentName] =\ 76 | [ 77 | 'face_analyser_direction_dropdown', 78 | 'face_analyser_age_dropdown', 79 | 'face_analyser_gender_dropdown' 80 | ] 81 | for component_name in select_component_names: 82 | component = get_ui_component(component_name) 83 | if component: 84 | component.select(update_face_reference_position, outputs = REFERENCE_FACE_POSITION_GALLERY) 85 | preview_frame_slider = get_ui_component('preview_frame_slider') 86 | if preview_frame_slider: 87 | preview_frame_slider.release(update_face_reference_position, outputs = REFERENCE_FACE_POSITION_GALLERY) 88 | 89 | 90 | def update_face_recognition(face_recognition : FaceRecognition) -> Tuple[Update, Update]: 91 | if face_recognition == 'reference': 92 | facefusion.globals.face_recognition = face_recognition 93 | return gradio.update(visible = True), gradio.update(visible = True) 94 | if face_recognition == 'many': 95 | facefusion.globals.face_recognition = face_recognition 96 | return gradio.update(visible = False), gradio.update(visible = False) 97 | 98 | 99 | def clear_and_update_face_reference_position(event: gradio.SelectData) -> Update: 100 | clear_face_reference() 101 | return update_face_reference_position(event.index) 102 | 103 | 104 | def update_face_reference_position(reference_face_position : int = 0) -> Update: 105 | gallery_frames = [] 106 | facefusion.globals.reference_face_position = reference_face_position 107 | if is_image(facefusion.globals.target_path): 108 | reference_frame = read_static_image(facefusion.globals.target_path) 109 | gallery_frames = extract_gallery_frames(reference_frame) 110 | if is_video(facefusion.globals.target_path): 111 | reference_frame = get_video_frame(facefusion.globals.target_path, facefusion.globals.reference_frame_number) 112 | gallery_frames = extract_gallery_frames(reference_frame) 113 | if gallery_frames: 114 | return gradio.update(value = gallery_frames) 115 | return gradio.update(value = None) 116 | 117 | 118 | def update_reference_face_distance(reference_face_distance : float) -> Update: 119 | facefusion.globals.reference_face_distance = reference_face_distance 120 | return gradio.update(value = reference_face_distance) 121 | 122 | 123 | def extract_gallery_frames(reference_frame : Frame) -> List[Frame]: 124 | crop_frames = [] 125 | faces = get_many_faces(reference_frame) 126 | for face in faces: 127 | start_x, start_y, end_x, end_y = map(int, face['bbox']) 128 | padding_x = int((end_x - start_x) * 0.25) 129 | padding_y = int((end_y - start_y) * 0.25) 130 | start_x = max(0, start_x - padding_x) 131 | start_y = max(0, start_y - padding_y) 132 | end_x = max(0, end_x + padding_x) 133 | end_y = max(0, end_y + padding_y) 134 | crop_frame = reference_frame[start_y:end_y, start_x:end_x] 135 | crop_frame = normalize_frame_color(crop_frame) 136 | crop_frames.append(crop_frame) 137 | return crop_frames 138 | -------------------------------------------------------------------------------- /facefusion/uis/components/frame_processors.py: -------------------------------------------------------------------------------- 1 | from typing import List, Optional 2 | import gradio 3 | 4 | import facefusion.globals 5 | from facefusion import wording 6 | from facefusion.processors.frame.core import load_frame_processor_module, clear_frame_processors_modules 7 | from facefusion.utilities import list_module_names 8 | from facefusion.uis.core import register_ui_component 9 | from facefusion.uis.typing import Update 10 | 11 | FRAME_PROCESSORS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None 12 | 13 | 14 | def render() -> None: 15 | global FRAME_PROCESSORS_CHECKBOX_GROUP 16 | 17 | FRAME_PROCESSORS_CHECKBOX_GROUP = gradio.CheckboxGroup( 18 | label = wording.get('frame_processors_checkbox_group_label'), 19 | choices = sort_frame_processors(facefusion.globals.frame_processors), 20 | value = facefusion.globals.frame_processors 21 | ) 22 | register_ui_component('frame_processors_checkbox_group', FRAME_PROCESSORS_CHECKBOX_GROUP) 23 | 24 | 25 | def listen() -> None: 26 | FRAME_PROCESSORS_CHECKBOX_GROUP.change(update_frame_processors, inputs = FRAME_PROCESSORS_CHECKBOX_GROUP, outputs = FRAME_PROCESSORS_CHECKBOX_GROUP) 27 | 28 | 29 | def update_frame_processors(frame_processors : List[str]) -> Update: 30 | facefusion.globals.frame_processors = frame_processors 31 | clear_frame_processors_modules() 32 | for frame_processor in frame_processors: 33 | frame_processor_module = load_frame_processor_module(frame_processor) 34 | if not frame_processor_module.pre_check(): 35 | return gradio.update() 36 | return gradio.update(value = frame_processors, choices = sort_frame_processors(frame_processors)) 37 | 38 | 39 | def sort_frame_processors(frame_processors : List[str]) -> list[str]: 40 | available_frame_processors = list_module_names('facefusion/processors/frame/modules') 41 | return sorted(available_frame_processors, key = lambda frame_processor : frame_processors.index(frame_processor) if frame_processor in frame_processors else len(frame_processors)) 42 | -------------------------------------------------------------------------------- /facefusion/uis/components/frame_processors_options.py: -------------------------------------------------------------------------------- 1 | from typing import List, Optional, Tuple 2 | import gradio 3 | 4 | import facefusion.globals 5 | from facefusion import wording 6 | from facefusion.processors.frame.core import load_frame_processor_module 7 | from facefusion.processors.frame import globals as frame_processors_globals, choices as frame_processors_choices 8 | from facefusion.uis.typing import Update 9 | from facefusion.uis.core import get_ui_component, register_ui_component 10 | 11 | FACE_SWAPPER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None 12 | FACE_ENHANCER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None 13 | FACE_ENHANCER_BLEND_SLIDER : Optional[gradio.Slider] = None 14 | FRAME_ENHANCER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None 15 | FRAME_ENHANCER_BLEND_SLIDER : Optional[gradio.Slider] = None 16 | 17 | 18 | def render() -> None: 19 | global FACE_SWAPPER_MODEL_DROPDOWN 20 | global FACE_ENHANCER_MODEL_DROPDOWN 21 | global FACE_ENHANCER_BLEND_SLIDER 22 | global FRAME_ENHANCER_MODEL_DROPDOWN 23 | global FRAME_ENHANCER_BLEND_SLIDER 24 | 25 | FACE_SWAPPER_MODEL_DROPDOWN = gradio.Dropdown( 26 | label = wording.get('face_swapper_model_dropdown_label'), 27 | choices = frame_processors_choices.face_swapper_models, 28 | value = frame_processors_globals.face_swapper_model, 29 | visible = 'face_swapper' in facefusion.globals.frame_processors 30 | ) 31 | FACE_ENHANCER_MODEL_DROPDOWN = gradio.Dropdown( 32 | label = wording.get('face_enhancer_model_dropdown_label'), 33 | choices = frame_processors_choices.face_enhancer_models, 34 | value = frame_processors_globals.face_enhancer_model, 35 | visible = 'face_enhancer' in facefusion.globals.frame_processors 36 | ) 37 | FACE_ENHANCER_BLEND_SLIDER = gradio.Slider( 38 | label = wording.get('face_enhancer_blend_slider_label'), 39 | value = frame_processors_globals.face_enhancer_blend, 40 | step = 1, 41 | minimum = 0, 42 | maximum = 100, 43 | visible = 'face_enhancer' in facefusion.globals.frame_processors 44 | ) 45 | FRAME_ENHANCER_MODEL_DROPDOWN = gradio.Dropdown( 46 | label = wording.get('frame_enhancer_model_dropdown_label'), 47 | choices = frame_processors_choices.frame_enhancer_models, 48 | value = frame_processors_globals.frame_enhancer_model, 49 | visible = 'frame_enhancer' in facefusion.globals.frame_processors 50 | ) 51 | FRAME_ENHANCER_BLEND_SLIDER = gradio.Slider( 52 | label = wording.get('frame_enhancer_blend_slider_label'), 53 | value = frame_processors_globals.frame_enhancer_blend, 54 | step = 1, 55 | minimum = 0, 56 | maximum = 100, 57 | visible = 'face_enhancer' in facefusion.globals.frame_processors 58 | ) 59 | register_ui_component('face_swapper_model_dropdown', FACE_SWAPPER_MODEL_DROPDOWN) 60 | register_ui_component('face_enhancer_model_dropdown', FACE_ENHANCER_MODEL_DROPDOWN) 61 | register_ui_component('face_enhancer_blend_slider', FACE_ENHANCER_BLEND_SLIDER) 62 | register_ui_component('frame_enhancer_model_dropdown', FRAME_ENHANCER_MODEL_DROPDOWN) 63 | register_ui_component('frame_enhancer_blend_slider', FRAME_ENHANCER_BLEND_SLIDER) 64 | 65 | 66 | def listen() -> None: 67 | FACE_SWAPPER_MODEL_DROPDOWN.change(update_face_swapper_model, inputs = FACE_SWAPPER_MODEL_DROPDOWN, outputs = FACE_SWAPPER_MODEL_DROPDOWN) 68 | FACE_ENHANCER_MODEL_DROPDOWN.change(update_face_enhancer_model, inputs = FACE_ENHANCER_MODEL_DROPDOWN, outputs = FACE_ENHANCER_MODEL_DROPDOWN) 69 | FACE_ENHANCER_BLEND_SLIDER.change(update_face_enhancer_blend, inputs = FACE_ENHANCER_BLEND_SLIDER) 70 | FRAME_ENHANCER_MODEL_DROPDOWN.change(update_frame_enhancer_model, inputs = FRAME_ENHANCER_MODEL_DROPDOWN, outputs = FRAME_ENHANCER_MODEL_DROPDOWN) 71 | FRAME_ENHANCER_BLEND_SLIDER.change(update_frame_enhancer_blend, inputs = FRAME_ENHANCER_BLEND_SLIDER) 72 | frame_processors_checkbox_group = get_ui_component('frame_processors_checkbox_group') 73 | if frame_processors_checkbox_group: 74 | frame_processors_checkbox_group.change(toggle_face_swapper_model, inputs = frame_processors_checkbox_group, outputs = [ FACE_SWAPPER_MODEL_DROPDOWN, FACE_ENHANCER_MODEL_DROPDOWN, FACE_ENHANCER_BLEND_SLIDER, FRAME_ENHANCER_MODEL_DROPDOWN, FRAME_ENHANCER_BLEND_SLIDER ]) 75 | 76 | 77 | def update_face_swapper_model(face_swapper_model : str) -> Update: 78 | frame_processors_globals.face_swapper_model = face_swapper_model 79 | face_swapper_module = load_frame_processor_module('face_swapper') 80 | face_swapper_module.clear_frame_processor() 81 | face_swapper_module.set_options('model', face_swapper_module.MODELS[face_swapper_model]) 82 | if not face_swapper_module.pre_check(): 83 | return gradio.update() 84 | return gradio.update(value = face_swapper_model) 85 | 86 | 87 | def update_face_enhancer_model(face_enhancer_model : str) -> Update: 88 | frame_processors_globals.face_enhancer_model = face_enhancer_model 89 | face_enhancer_module = load_frame_processor_module('face_enhancer') 90 | face_enhancer_module.clear_frame_processor() 91 | face_enhancer_module.set_options('model', face_enhancer_module.MODELS[face_enhancer_model]) 92 | if not face_enhancer_module.pre_check(): 93 | return gradio.update() 94 | return gradio.update(value = face_enhancer_model) 95 | 96 | 97 | def update_face_enhancer_blend(face_enhancer_blend : int) -> None: 98 | frame_processors_globals.face_enhancer_blend = face_enhancer_blend 99 | 100 | 101 | def update_frame_enhancer_model(frame_enhancer_model : str) -> Update: 102 | frame_processors_globals.frame_enhancer_model = frame_enhancer_model 103 | frame_enhancer_module = load_frame_processor_module('frame_enhancer') 104 | frame_enhancer_module.clear_frame_processor() 105 | frame_enhancer_module.set_options('model', frame_enhancer_module.MODELS[frame_enhancer_model]) 106 | if not frame_enhancer_module.pre_check(): 107 | return gradio.update() 108 | return gradio.update(value = frame_enhancer_model) 109 | 110 | 111 | def update_frame_enhancer_blend(frame_enhancer_blend : int) -> None: 112 | frame_processors_globals.frame_enhancer_blend = frame_enhancer_blend 113 | 114 | 115 | def toggle_face_swapper_model(frame_processors : List[str]) -> Tuple[Update, Update, Update, Update, Update]: 116 | has_face_swapper = 'face_swapper' in frame_processors 117 | has_face_enhancer = 'face_enhancer' in frame_processors 118 | has_frame_enhancer = 'frame_enhancer' in frame_processors 119 | return gradio.update(visible = has_face_swapper), gradio.update(visible = has_face_enhancer), gradio.update(visible = has_face_enhancer), gradio.update(visible = has_frame_enhancer), gradio.update(visible = has_frame_enhancer) 120 | -------------------------------------------------------------------------------- /facefusion/uis/components/limit_resources.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | import gradio 3 | 4 | import facefusion.globals 5 | from facefusion import wording 6 | 7 | MAX_MEMORY_SLIDER : Optional[gradio.Slider] = None 8 | 9 | 10 | def render() -> None: 11 | global MAX_MEMORY_SLIDER 12 | 13 | MAX_MEMORY_SLIDER = gradio.Slider( 14 | label = wording.get('max_memory_slider_label'), 15 | step = 1, 16 | minimum = 0, 17 | maximum = 128 18 | ) 19 | 20 | 21 | def listen() -> None: 22 | MAX_MEMORY_SLIDER.change(update_max_memory, inputs = MAX_MEMORY_SLIDER) 23 | 24 | 25 | def update_max_memory(max_memory : int) -> None: 26 | facefusion.globals.max_memory = max_memory if max_memory > 0 else None 27 | -------------------------------------------------------------------------------- /facefusion/uis/components/output.py: -------------------------------------------------------------------------------- 1 | from typing import Tuple, Optional 2 | import gradio 3 | 4 | import facefusion.globals 5 | from facefusion import wording 6 | from facefusion.core import limit_resources, conditional_process 7 | from facefusion.uis.core import get_ui_component 8 | from facefusion.utilities import is_image, is_video, normalize_output_path, clear_temp 9 | from facefusion.uis.typing import Update 10 | 11 | OUTPUT_IMAGE : Optional[gradio.Image] = None 12 | OUTPUT_VIDEO : Optional[gradio.Video] = None 13 | OUTPUT_START_BUTTON : Optional[gradio.Button] = None 14 | OUTPUT_CLEAR_BUTTON : Optional[gradio.Button] = None 15 | 16 | 17 | def render() -> None: 18 | global OUTPUT_IMAGE 19 | global OUTPUT_VIDEO 20 | global OUTPUT_START_BUTTON 21 | global OUTPUT_CLEAR_BUTTON 22 | 23 | OUTPUT_IMAGE = gradio.Image( 24 | label = wording.get('output_image_or_video_label'), 25 | visible = False 26 | ) 27 | OUTPUT_VIDEO = gradio.Video( 28 | label = wording.get('output_image_or_video_label') 29 | ) 30 | OUTPUT_START_BUTTON = gradio.Button( 31 | value = wording.get('start_button_label'), 32 | variant = 'primary', 33 | size = 'sm' 34 | ) 35 | OUTPUT_CLEAR_BUTTON = gradio.Button( 36 | value = wording.get('clear_button_label'), 37 | size = 'sm' 38 | ) 39 | 40 | 41 | def listen() -> None: 42 | output_path_textbox = get_ui_component('output_path_textbox') 43 | if output_path_textbox: 44 | OUTPUT_START_BUTTON.click(start, inputs = output_path_textbox, outputs = [ OUTPUT_IMAGE, OUTPUT_VIDEO ]) 45 | OUTPUT_CLEAR_BUTTON.click(clear, outputs = [ OUTPUT_IMAGE, OUTPUT_VIDEO ]) 46 | 47 | 48 | def start(output_path : str) -> Tuple[Update, Update]: 49 | facefusion.globals.output_path = normalize_output_path(facefusion.globals.source_path, facefusion.globals.target_path, output_path) 50 | limit_resources() 51 | conditional_process() 52 | if is_image(facefusion.globals.output_path): 53 | return gradio.update(value = facefusion.globals.output_path, visible = True), gradio.update(value = None, visible = False) 54 | if is_video(facefusion.globals.output_path): 55 | return gradio.update(value = None, visible = False), gradio.update(value = facefusion.globals.output_path, visible = True) 56 | return gradio.update(), gradio.update() 57 | 58 | 59 | def clear() -> Tuple[Update, Update]: 60 | if facefusion.globals.target_path: 61 | clear_temp(facefusion.globals.target_path) 62 | return gradio.update(value = None), gradio.update(value = None) 63 | -------------------------------------------------------------------------------- /facefusion/uis/components/output_options.py: -------------------------------------------------------------------------------- 1 | from typing import Optional, Tuple, List 2 | import tempfile 3 | import gradio 4 | 5 | import facefusion.choices 6 | import facefusion.globals 7 | from facefusion import wording 8 | from facefusion.typing import OutputVideoEncoder 9 | from facefusion.utilities import is_image, is_video 10 | from facefusion.uis.typing import Update, ComponentName 11 | from facefusion.uis.core import get_ui_component, register_ui_component 12 | 13 | OUTPUT_PATH_TEXTBOX : Optional[gradio.Textbox] = None 14 | OUTPUT_IMAGE_QUALITY_SLIDER : Optional[gradio.Slider] = None 15 | OUTPUT_VIDEO_ENCODER_DROPDOWN : Optional[gradio.Dropdown] = None 16 | OUTPUT_VIDEO_QUALITY_SLIDER : Optional[gradio.Slider] = None 17 | 18 | 19 | def render() -> None: 20 | global OUTPUT_PATH_TEXTBOX 21 | global OUTPUT_IMAGE_QUALITY_SLIDER 22 | global OUTPUT_VIDEO_ENCODER_DROPDOWN 23 | global OUTPUT_VIDEO_QUALITY_SLIDER 24 | 25 | OUTPUT_PATH_TEXTBOX = gradio.Textbox( 26 | label = wording.get('output_path_textbox_label'), 27 | value = facefusion.globals.output_path or tempfile.gettempdir(), 28 | max_lines = 1 29 | ) 30 | OUTPUT_IMAGE_QUALITY_SLIDER = gradio.Slider( 31 | label = wording.get('output_image_quality_slider_label'), 32 | value = facefusion.globals.output_image_quality, 33 | step = 1, 34 | minimum = 0, 35 | maximum = 100, 36 | visible = is_image(facefusion.globals.target_path) 37 | ) 38 | OUTPUT_VIDEO_ENCODER_DROPDOWN = gradio.Dropdown( 39 | label = wording.get('output_video_encoder_dropdown_label'), 40 | choices = facefusion.choices.output_video_encoders, 41 | value = facefusion.globals.output_video_encoder, 42 | visible = is_video(facefusion.globals.target_path) 43 | ) 44 | OUTPUT_VIDEO_QUALITY_SLIDER = gradio.Slider( 45 | label = wording.get('output_video_quality_slider_label'), 46 | value = facefusion.globals.output_video_quality, 47 | step = 1, 48 | minimum = 0, 49 | maximum = 100, 50 | visible = is_video(facefusion.globals.target_path) 51 | ) 52 | register_ui_component('output_path_textbox', OUTPUT_PATH_TEXTBOX) 53 | 54 | 55 | def listen() -> None: 56 | OUTPUT_PATH_TEXTBOX.change(update_output_path, inputs = OUTPUT_PATH_TEXTBOX) 57 | OUTPUT_IMAGE_QUALITY_SLIDER.change(update_output_image_quality, inputs = OUTPUT_IMAGE_QUALITY_SLIDER) 58 | OUTPUT_VIDEO_ENCODER_DROPDOWN.select(update_output_video_encoder, inputs = OUTPUT_VIDEO_ENCODER_DROPDOWN) 59 | OUTPUT_VIDEO_QUALITY_SLIDER.change(update_output_video_quality, inputs = OUTPUT_VIDEO_QUALITY_SLIDER) 60 | multi_component_names : List[ComponentName] =\ 61 | [ 62 | 'source_image', 63 | 'target_image', 64 | 'target_video' 65 | ] 66 | for component_name in multi_component_names: 67 | component = get_ui_component(component_name) 68 | if component: 69 | for method in [ 'upload', 'change', 'clear' ]: 70 | getattr(component, method)(remote_update, outputs = [ OUTPUT_IMAGE_QUALITY_SLIDER, OUTPUT_VIDEO_ENCODER_DROPDOWN, OUTPUT_VIDEO_QUALITY_SLIDER ]) 71 | 72 | 73 | def remote_update() -> Tuple[Update, Update, Update]: 74 | if is_image(facefusion.globals.target_path): 75 | return gradio.update(visible = True), gradio.update(visible = False), gradio.update(visible = False) 76 | if is_video(facefusion.globals.target_path): 77 | return gradio.update(visible = False), gradio.update(visible = True), gradio.update(visible = True) 78 | return gradio.update(visible = False), gradio.update(visible = False), gradio.update(visible = False) 79 | 80 | 81 | def update_output_path(output_path : str) -> None: 82 | facefusion.globals.output_path = output_path 83 | 84 | 85 | def update_output_image_quality(output_image_quality : int) -> None: 86 | facefusion.globals.output_image_quality = output_image_quality 87 | 88 | 89 | def update_output_video_encoder(output_video_encoder: OutputVideoEncoder) -> None: 90 | facefusion.globals.output_video_encoder = output_video_encoder 91 | 92 | 93 | def update_output_video_quality(output_video_quality : int) -> None: 94 | facefusion.globals.output_video_quality = output_video_quality 95 | -------------------------------------------------------------------------------- /facefusion/uis/components/preview.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Dict, List, Optional 2 | import cv2 3 | import gradio 4 | 5 | import facefusion.globals 6 | from facefusion import wording 7 | from facefusion.typing import Frame, Face 8 | from facefusion.vision import get_video_frame, count_video_frame_total, normalize_frame_color, resize_frame_dimension, read_static_image 9 | from facefusion.face_analyser import get_one_face 10 | from facefusion.face_reference import get_face_reference, set_face_reference 11 | from facefusion.predictor import predict_frame 12 | from facefusion.processors.frame.core import load_frame_processor_module 13 | from facefusion.utilities import is_video, is_image 14 | from facefusion.uis.typing import ComponentName, Update 15 | from facefusion.uis.core import get_ui_component, register_ui_component 16 | 17 | PREVIEW_IMAGE : Optional[gradio.Image] = None 18 | PREVIEW_FRAME_SLIDER : Optional[gradio.Slider] = None 19 | 20 | 21 | def render() -> None: 22 | global PREVIEW_IMAGE 23 | global PREVIEW_FRAME_SLIDER 24 | 25 | preview_image_args: Dict[str, Any] =\ 26 | { 27 | 'label': wording.get('preview_image_label'), 28 | 'interactive': False 29 | } 30 | preview_frame_slider_args: Dict[str, Any] =\ 31 | { 32 | 'label': wording.get('preview_frame_slider_label'), 33 | 'step': 1, 34 | 'minimum': 0, 35 | 'maximum': 100, 36 | 'visible': False 37 | } 38 | conditional_set_face_reference() 39 | source_face = get_one_face(read_static_image(facefusion.globals.source_path)) 40 | reference_face = get_face_reference() if 'reference' in facefusion.globals.face_recognition else None 41 | if is_image(facefusion.globals.target_path): 42 | target_frame = read_static_image(facefusion.globals.target_path) 43 | preview_frame = process_preview_frame(source_face, reference_face, target_frame) 44 | preview_image_args['value'] = normalize_frame_color(preview_frame) 45 | if is_video(facefusion.globals.target_path): 46 | temp_frame = get_video_frame(facefusion.globals.target_path, facefusion.globals.reference_frame_number) 47 | preview_frame = process_preview_frame(source_face, reference_face, temp_frame) 48 | preview_image_args['value'] = normalize_frame_color(preview_frame) 49 | preview_image_args['visible'] = True 50 | preview_frame_slider_args['value'] = facefusion.globals.reference_frame_number 51 | preview_frame_slider_args['maximum'] = count_video_frame_total(facefusion.globals.target_path) 52 | preview_frame_slider_args['visible'] = True 53 | PREVIEW_IMAGE = gradio.Image(**preview_image_args) 54 | PREVIEW_FRAME_SLIDER = gradio.Slider(**preview_frame_slider_args) 55 | register_ui_component('preview_frame_slider', PREVIEW_FRAME_SLIDER) 56 | 57 | 58 | def listen() -> None: 59 | PREVIEW_FRAME_SLIDER.change(update_preview_image, inputs = PREVIEW_FRAME_SLIDER, outputs = PREVIEW_IMAGE) 60 | multi_component_names : List[ComponentName] =\ 61 | [ 62 | 'source_image', 63 | 'target_image', 64 | 'target_video' 65 | ] 66 | for component_name in multi_component_names: 67 | component = get_ui_component(component_name) 68 | if component: 69 | for method in [ 'upload', 'change', 'clear' ]: 70 | getattr(component, method)(update_preview_image, inputs = PREVIEW_FRAME_SLIDER, outputs = PREVIEW_IMAGE) 71 | getattr(component, method)(update_preview_frame_slider, inputs = PREVIEW_FRAME_SLIDER, outputs = PREVIEW_FRAME_SLIDER) 72 | update_component_names : List[ComponentName] =\ 73 | [ 74 | 'face_recognition_dropdown', 75 | 'frame_processors_checkbox_group', 76 | 'face_swapper_model_dropdown', 77 | 'face_enhancer_model_dropdown', 78 | 'frame_enhancer_model_dropdown' 79 | ] 80 | for component_name in update_component_names: 81 | component = get_ui_component(component_name) 82 | if component: 83 | component.change(update_preview_image, inputs = PREVIEW_FRAME_SLIDER, outputs = PREVIEW_IMAGE) 84 | select_component_names : List[ComponentName] =\ 85 | [ 86 | 'reference_face_position_gallery', 87 | 'face_analyser_direction_dropdown', 88 | 'face_analyser_age_dropdown', 89 | 'face_analyser_gender_dropdown' 90 | ] 91 | for component_name in select_component_names: 92 | component = get_ui_component(component_name) 93 | if component: 94 | component.select(update_preview_image, inputs = PREVIEW_FRAME_SLIDER, outputs = PREVIEW_IMAGE) 95 | change_component_names : List[ComponentName] =\ 96 | [ 97 | 'reference_face_distance_slider', 98 | 'face_enhancer_blend_slider', 99 | 'frame_enhancer_blend_slider' 100 | ] 101 | for component_name in change_component_names: 102 | component = get_ui_component(component_name) 103 | if component: 104 | component.change(update_preview_image, inputs = PREVIEW_FRAME_SLIDER, outputs = PREVIEW_IMAGE) 105 | 106 | 107 | def update_preview_image(frame_number : int = 0) -> Update: 108 | conditional_set_face_reference() 109 | source_face = get_one_face(read_static_image(facefusion.globals.source_path)) 110 | reference_face = get_face_reference() if 'reference' in facefusion.globals.face_recognition else None 111 | if is_image(facefusion.globals.target_path): 112 | target_frame = read_static_image(facefusion.globals.target_path) 113 | preview_frame = process_preview_frame(source_face, reference_face, target_frame) 114 | preview_frame = normalize_frame_color(preview_frame) 115 | return gradio.update(value = preview_frame) 116 | if is_video(facefusion.globals.target_path): 117 | facefusion.globals.reference_frame_number = frame_number 118 | temp_frame = get_video_frame(facefusion.globals.target_path, facefusion.globals.reference_frame_number) 119 | preview_frame = process_preview_frame(source_face, reference_face, temp_frame) 120 | preview_frame = normalize_frame_color(preview_frame) 121 | return gradio.update(value = preview_frame) 122 | return gradio.update(value = None) 123 | 124 | 125 | def update_preview_frame_slider(frame_number : int = 0) -> Update: 126 | if is_image(facefusion.globals.target_path): 127 | return gradio.update(value = None, maximum = None, visible = False) 128 | if is_video(facefusion.globals.target_path): 129 | facefusion.globals.reference_frame_number = frame_number 130 | video_frame_total = count_video_frame_total(facefusion.globals.target_path) 131 | return gradio.update(maximum = video_frame_total, visible = True) 132 | return gradio.update() 133 | 134 | 135 | def process_preview_frame(source_face : Face, reference_face : Face, temp_frame : Frame) -> Frame: 136 | temp_frame = resize_frame_dimension(temp_frame, 640, 640) 137 | if predict_frame(temp_frame): 138 | return cv2.GaussianBlur(temp_frame, (99, 99), 0) 139 | for frame_processor in facefusion.globals.frame_processors: 140 | frame_processor_module = load_frame_processor_module(frame_processor) 141 | if frame_processor_module.pre_process('preview'): 142 | temp_frame = frame_processor_module.process_frame( 143 | source_face, 144 | reference_face, 145 | temp_frame 146 | ) 147 | return temp_frame 148 | 149 | 150 | def conditional_set_face_reference() -> None: 151 | if 'reference' in facefusion.globals.face_recognition and not get_face_reference(): 152 | reference_frame = get_video_frame(facefusion.globals.target_path, facefusion.globals.reference_frame_number) 153 | reference_face = get_one_face(reference_frame, facefusion.globals.reference_face_position) 154 | set_face_reference(reference_face) 155 | -------------------------------------------------------------------------------- /facefusion/uis/components/source.py: -------------------------------------------------------------------------------- 1 | from typing import Any, IO, Optional 2 | import gradio 3 | 4 | import facefusion.globals 5 | from facefusion import wording 6 | from facefusion.utilities import is_image 7 | from facefusion.uis.typing import Update 8 | from facefusion.uis.core import register_ui_component 9 | 10 | SOURCE_FILE : Optional[gradio.File] = None 11 | SOURCE_IMAGE : Optional[gradio.Image] = None 12 | 13 | 14 | def render() -> None: 15 | global SOURCE_FILE 16 | global SOURCE_IMAGE 17 | 18 | is_source_image = is_image(facefusion.globals.source_path) 19 | SOURCE_FILE = gradio.File( 20 | file_count = 'single', 21 | file_types = 22 | [ 23 | '.png', 24 | '.jpg', 25 | '.webp' 26 | ], 27 | label = wording.get('source_file_label'), 28 | value = facefusion.globals.source_path if is_source_image else None 29 | ) 30 | SOURCE_IMAGE = gradio.Image( 31 | value = SOURCE_FILE.value['name'] if is_source_image else None, 32 | visible = is_source_image, 33 | show_label = False 34 | ) 35 | register_ui_component('source_image', SOURCE_IMAGE) 36 | 37 | 38 | def listen() -> None: 39 | SOURCE_FILE.change(update, inputs = SOURCE_FILE, outputs = SOURCE_IMAGE) 40 | 41 | 42 | def update(file: IO[Any]) -> Update: 43 | if file and is_image(file.name): 44 | facefusion.globals.source_path = file.name 45 | return gradio.update(value = file.name, visible = True) 46 | facefusion.globals.source_path = None 47 | return gradio.update(value = None, visible = False) 48 | -------------------------------------------------------------------------------- /facefusion/uis/components/target.py: -------------------------------------------------------------------------------- 1 | from typing import Any, IO, Tuple, Optional 2 | import gradio 3 | 4 | import facefusion.globals 5 | from facefusion import wording 6 | from facefusion.face_reference import clear_face_reference 7 | from facefusion.utilities import is_image, is_video 8 | from facefusion.uis.typing import Update 9 | from facefusion.uis.core import register_ui_component 10 | 11 | TARGET_FILE : Optional[gradio.File] = None 12 | TARGET_IMAGE : Optional[gradio.Image] = None 13 | TARGET_VIDEO : Optional[gradio.Video] = None 14 | 15 | 16 | def render() -> None: 17 | global TARGET_FILE 18 | global TARGET_IMAGE 19 | global TARGET_VIDEO 20 | 21 | is_target_image = is_image(facefusion.globals.target_path) 22 | is_target_video = is_video(facefusion.globals.target_path) 23 | TARGET_FILE = gradio.File( 24 | label = wording.get('target_file_label'), 25 | file_count = 'single', 26 | file_types = 27 | [ 28 | '.png', 29 | '.jpg', 30 | '.webp', 31 | '.mp4' 32 | ], 33 | value = facefusion.globals.target_path if is_target_image or is_target_video else None 34 | ) 35 | TARGET_IMAGE = gradio.Image( 36 | value = TARGET_FILE.value['name'] if is_target_image else None, 37 | visible = is_target_image, 38 | show_label = False 39 | ) 40 | TARGET_VIDEO = gradio.Video( 41 | value = TARGET_FILE.value['name'] if is_target_video else None, 42 | visible = is_target_video, 43 | show_label = False 44 | ) 45 | register_ui_component('target_image', TARGET_IMAGE) 46 | register_ui_component('target_video', TARGET_VIDEO) 47 | 48 | 49 | def listen() -> None: 50 | TARGET_FILE.change(update, inputs = TARGET_FILE, outputs = [ TARGET_IMAGE, TARGET_VIDEO ]) 51 | 52 | 53 | def update(file : IO[Any]) -> Tuple[Update, Update]: 54 | clear_face_reference() 55 | if file and is_image(file.name): 56 | facefusion.globals.target_path = file.name 57 | return gradio.update(value = file.name, visible = True), gradio.update(value = None, visible = False) 58 | if file and is_video(file.name): 59 | facefusion.globals.target_path = file.name 60 | return gradio.update(value = None, visible = False), gradio.update(value = file.name, visible = True) 61 | facefusion.globals.target_path = None 62 | return gradio.update(value = None, visible = False), gradio.update(value = None, visible = False) 63 | -------------------------------------------------------------------------------- /facefusion/uis/components/temp_frame.py: -------------------------------------------------------------------------------- 1 | from typing import Optional, Tuple 2 | import gradio 3 | 4 | import facefusion.choices 5 | import facefusion.globals 6 | from facefusion import wording 7 | from facefusion.typing import TempFrameFormat 8 | from facefusion.utilities import is_video 9 | from facefusion.uis.typing import Update 10 | from facefusion.uis.core import get_ui_component 11 | 12 | TEMP_FRAME_FORMAT_DROPDOWN : Optional[gradio.Dropdown] = None 13 | TEMP_FRAME_QUALITY_SLIDER : Optional[gradio.Slider] = None 14 | 15 | 16 | def render() -> None: 17 | global TEMP_FRAME_FORMAT_DROPDOWN 18 | global TEMP_FRAME_QUALITY_SLIDER 19 | 20 | TEMP_FRAME_FORMAT_DROPDOWN = gradio.Dropdown( 21 | label = wording.get('temp_frame_format_dropdown_label'), 22 | choices = facefusion.choices.temp_frame_formats, 23 | value = facefusion.globals.temp_frame_format, 24 | visible = is_video(facefusion.globals.target_path) 25 | ) 26 | TEMP_FRAME_QUALITY_SLIDER = gradio.Slider( 27 | label = wording.get('temp_frame_quality_slider_label'), 28 | value = facefusion.globals.temp_frame_quality, 29 | step = 1, 30 | minimum = 0, 31 | maximum = 100, 32 | visible = is_video(facefusion.globals.target_path) 33 | ) 34 | 35 | 36 | def listen() -> None: 37 | TEMP_FRAME_FORMAT_DROPDOWN.select(update_temp_frame_format, inputs = TEMP_FRAME_FORMAT_DROPDOWN) 38 | TEMP_FRAME_QUALITY_SLIDER.change(update_temp_frame_quality, inputs = TEMP_FRAME_QUALITY_SLIDER) 39 | target_video = get_ui_component('target_video') 40 | if target_video: 41 | for method in [ 'upload', 'change', 'clear' ]: 42 | getattr(target_video, method)(remote_update, outputs = [ TEMP_FRAME_FORMAT_DROPDOWN, TEMP_FRAME_QUALITY_SLIDER ]) 43 | 44 | 45 | def remote_update() -> Tuple[Update, Update]: 46 | if is_video(facefusion.globals.target_path): 47 | return gradio.update(visible = True), gradio.update(visible = True) 48 | return gradio.update(visible = False), gradio.update(visible = False) 49 | 50 | 51 | def update_temp_frame_format(temp_frame_format : TempFrameFormat) -> None: 52 | facefusion.globals.temp_frame_format = temp_frame_format 53 | 54 | 55 | def update_temp_frame_quality(temp_frame_quality : int) -> None: 56 | facefusion.globals.temp_frame_quality = temp_frame_quality 57 | -------------------------------------------------------------------------------- /facefusion/uis/components/trim_frame.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Dict, Tuple, Optional 2 | import gradio 3 | 4 | import facefusion.globals 5 | from facefusion import wording 6 | from facefusion.vision import count_video_frame_total 7 | from facefusion.utilities import is_video 8 | from facefusion.uis.typing import Update 9 | from facefusion.uis.core import get_ui_component 10 | 11 | TRIM_FRAME_START_SLIDER : Optional[gradio.Slider] = None 12 | TRIM_FRAME_END_SLIDER : Optional[gradio.Slider] = None 13 | 14 | 15 | def render() -> None: 16 | global TRIM_FRAME_START_SLIDER 17 | global TRIM_FRAME_END_SLIDER 18 | 19 | trim_frame_start_slider_args : Dict[str, Any] =\ 20 | { 21 | 'label': wording.get('trim_frame_start_slider_label'), 22 | 'step': 1, 23 | 'minimum': 0, 24 | 'maximum': 100, 25 | 'visible': False 26 | } 27 | trim_frame_end_slider_args : Dict[str, Any] =\ 28 | { 29 | 'label': wording.get('trim_frame_end_slider_label'), 30 | 'step': 1, 31 | 'minimum': 0, 32 | 'maximum': 100, 33 | 'visible': False 34 | } 35 | if is_video(facefusion.globals.target_path): 36 | video_frame_total = count_video_frame_total(facefusion.globals.target_path) 37 | trim_frame_start_slider_args['value'] = facefusion.globals.trim_frame_start or 0 38 | trim_frame_start_slider_args['maximum'] = video_frame_total 39 | trim_frame_start_slider_args['visible'] = True 40 | trim_frame_end_slider_args['value'] = facefusion.globals.trim_frame_end or video_frame_total 41 | trim_frame_end_slider_args['maximum'] = video_frame_total 42 | trim_frame_end_slider_args['visible'] = True 43 | TRIM_FRAME_START_SLIDER = gradio.Slider(**trim_frame_start_slider_args) 44 | TRIM_FRAME_END_SLIDER = gradio.Slider(**trim_frame_end_slider_args) 45 | 46 | 47 | def listen() -> None: 48 | TRIM_FRAME_START_SLIDER.change(update_trim_frame_start, inputs = TRIM_FRAME_START_SLIDER) 49 | TRIM_FRAME_END_SLIDER.change(update_trim_frame_end, inputs = TRIM_FRAME_END_SLIDER) 50 | target_video = get_ui_component('target_video') 51 | if target_video: 52 | for method in [ 'upload', 'change', 'clear' ]: 53 | getattr(target_video, method)(remote_update, outputs = [ TRIM_FRAME_START_SLIDER, TRIM_FRAME_END_SLIDER ]) 54 | 55 | 56 | def remote_update() -> Tuple[Update, Update]: 57 | if is_video(facefusion.globals.target_path): 58 | video_frame_total = count_video_frame_total(facefusion.globals.target_path) 59 | facefusion.globals.trim_frame_start = None 60 | facefusion.globals.trim_frame_end = None 61 | return gradio.update(value = 0, maximum = video_frame_total, visible = True), gradio.update(value = video_frame_total, maximum = video_frame_total, visible = True) 62 | return gradio.update(value = None, maximum = None, visible = False), gradio.update(value = None, maximum = None, visible = False) 63 | 64 | 65 | def update_trim_frame_start(trim_frame_start : int) -> None: 66 | facefusion.globals.trim_frame_start = trim_frame_start if trim_frame_start > 0 else None 67 | 68 | 69 | def update_trim_frame_end(trim_frame_end : int) -> None: 70 | video_frame_total = count_video_frame_total(facefusion.globals.target_path) 71 | facefusion.globals.trim_frame_end = trim_frame_end if trim_frame_end < video_frame_total else None 72 | -------------------------------------------------------------------------------- /facefusion/uis/components/webcam.py: -------------------------------------------------------------------------------- 1 | from typing import Optional, Generator, Deque 2 | from concurrent.futures import ThreadPoolExecutor 3 | from collections import deque 4 | import os 5 | import platform 6 | import subprocess 7 | import cv2 8 | import gradio 9 | from tqdm import tqdm 10 | 11 | import facefusion.globals 12 | from facefusion import wording 13 | from facefusion.predictor import predict_stream 14 | from facefusion.typing import Frame, Face 15 | from facefusion.face_analyser import get_one_face 16 | from facefusion.processors.frame.core import get_frame_processors_modules 17 | from facefusion.utilities import open_ffmpeg 18 | from facefusion.vision import normalize_frame_color, read_static_image 19 | from facefusion.uis.typing import StreamMode, WebcamMode, Update 20 | from facefusion.uis.core import get_ui_component 21 | 22 | WEBCAM_IMAGE : Optional[gradio.Image] = None 23 | WEBCAM_START_BUTTON : Optional[gradio.Button] = None 24 | WEBCAM_STOP_BUTTON : Optional[gradio.Button] = None 25 | 26 | 27 | def render() -> None: 28 | global WEBCAM_IMAGE 29 | global WEBCAM_START_BUTTON 30 | global WEBCAM_STOP_BUTTON 31 | 32 | WEBCAM_IMAGE = gradio.Image( 33 | label = wording.get('webcam_image_label') 34 | ) 35 | WEBCAM_START_BUTTON = gradio.Button( 36 | value = wording.get('start_button_label'), 37 | variant = 'primary', 38 | size = 'sm' 39 | ) 40 | WEBCAM_STOP_BUTTON = gradio.Button( 41 | value = wording.get('stop_button_label'), 42 | size = 'sm' 43 | ) 44 | 45 | 46 | def listen() -> None: 47 | start_event = None 48 | webcam_mode_radio = get_ui_component('webcam_mode_radio') 49 | webcam_resolution_dropdown = get_ui_component('webcam_resolution_dropdown') 50 | webcam_fps_slider = get_ui_component('webcam_fps_slider') 51 | if webcam_mode_radio and webcam_resolution_dropdown and webcam_fps_slider: 52 | start_event = WEBCAM_START_BUTTON.click(start, inputs = [ webcam_mode_radio, webcam_resolution_dropdown, webcam_fps_slider ], outputs = WEBCAM_IMAGE) 53 | webcam_mode_radio.change(stop, outputs = WEBCAM_IMAGE, cancels = start_event) 54 | webcam_resolution_dropdown.change(stop, outputs = WEBCAM_IMAGE, cancels = start_event) 55 | webcam_fps_slider.change(stop, outputs = WEBCAM_IMAGE, cancels = start_event) 56 | WEBCAM_STOP_BUTTON.click(stop, cancels = start_event) 57 | source_image = get_ui_component('source_image') 58 | if source_image: 59 | for method in [ 'upload', 'change', 'clear' ]: 60 | getattr(source_image, method)(stop, cancels = start_event) 61 | 62 | 63 | def start(mode: WebcamMode, resolution: str, fps: float) -> Generator[Frame, None, None]: 64 | facefusion.globals.face_recognition = 'many' 65 | source_face = get_one_face(read_static_image(facefusion.globals.source_path)) 66 | stream = None 67 | if mode in [ 'udp', 'v4l2' ]: 68 | stream = open_stream(mode, resolution, fps) # type: ignore[arg-type] 69 | capture = capture_webcam(resolution, fps) 70 | if capture.isOpened(): 71 | for capture_frame in multi_process_capture(source_face, capture): 72 | if stream is not None: 73 | stream.stdin.write(capture_frame.tobytes()) 74 | yield normalize_frame_color(capture_frame) 75 | 76 | 77 | def multi_process_capture(source_face: Face, capture : cv2.VideoCapture) -> Generator[Frame, None, None]: 78 | progress = tqdm(desc = wording.get('processing'), unit = 'frame', dynamic_ncols = True) 79 | with ThreadPoolExecutor(max_workers = facefusion.globals.execution_thread_count) as executor: 80 | futures = [] 81 | deque_capture_frames : Deque[Frame] = deque() 82 | while True: 83 | _, capture_frame = capture.read() 84 | if predict_stream(capture_frame): 85 | return 86 | future = executor.submit(process_stream_frame, source_face, capture_frame) 87 | futures.append(future) 88 | for future_done in [ future for future in futures if future.done() ]: 89 | capture_frame = future_done.result() 90 | deque_capture_frames.append(capture_frame) 91 | futures.remove(future_done) 92 | while deque_capture_frames: 93 | yield deque_capture_frames.popleft() 94 | progress.update() 95 | 96 | 97 | def stop() -> Update: 98 | return gradio.update(value = None) 99 | 100 | 101 | def capture_webcam(resolution : str, fps : float) -> cv2.VideoCapture: 102 | width, height = resolution.split('x') 103 | if platform.system().lower() == 'windows': 104 | capture = cv2.VideoCapture(0, cv2.CAP_DSHOW) 105 | else: 106 | capture = cv2.VideoCapture(0) 107 | capture.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG')) # type: ignore[attr-defined] 108 | capture.set(cv2.CAP_PROP_FRAME_WIDTH, int(width)) 109 | capture.set(cv2.CAP_PROP_FRAME_HEIGHT, int(height)) 110 | capture.set(cv2.CAP_PROP_FPS, fps) 111 | return capture 112 | 113 | 114 | def process_stream_frame(source_face : Face, temp_frame : Frame) -> Frame: 115 | for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors): 116 | if frame_processor_module.pre_process('stream'): 117 | temp_frame = frame_processor_module.process_frame( 118 | source_face, 119 | None, 120 | temp_frame 121 | ) 122 | return temp_frame 123 | 124 | 125 | def open_stream(mode : StreamMode, resolution : str, fps : float) -> subprocess.Popen[bytes]: 126 | commands = [ '-f', 'rawvideo', '-pix_fmt', 'bgr24', '-s', resolution, '-r', str(fps), '-i', '-' ] 127 | if mode == 'udp': 128 | commands.extend([ '-b:v', '2000k', '-f', 'mpegts', 'udp://localhost:27000?pkt_size=1316' ]) 129 | if mode == 'v4l2': 130 | device_name = os.listdir('/sys/devices/virtual/video4linux')[0] 131 | commands.extend([ '-f', 'v4l2', '/dev/' + device_name ]) 132 | return open_ffmpeg(commands) 133 | -------------------------------------------------------------------------------- /facefusion/uis/components/webcam_options.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | import gradio 3 | 4 | from facefusion import wording 5 | from facefusion.uis import choices 6 | from facefusion.uis.core import register_ui_component 7 | 8 | WEBCAM_MODE_RADIO : Optional[gradio.Radio] = None 9 | WEBCAM_RESOLUTION_DROPDOWN : Optional[gradio.Dropdown] = None 10 | WEBCAM_FPS_SLIDER : Optional[gradio.Slider] = None 11 | 12 | 13 | def render() -> None: 14 | global WEBCAM_MODE_RADIO 15 | global WEBCAM_RESOLUTION_DROPDOWN 16 | global WEBCAM_FPS_SLIDER 17 | 18 | WEBCAM_MODE_RADIO = gradio.Radio( 19 | label = wording.get('webcam_mode_radio_label'), 20 | choices = choices.webcam_modes, 21 | value = 'inline' 22 | ) 23 | WEBCAM_RESOLUTION_DROPDOWN = gradio.Dropdown( 24 | label = wording.get('webcam_resolution_dropdown'), 25 | choices = choices.webcam_resolutions, 26 | value = choices.webcam_resolutions[0] 27 | ) 28 | WEBCAM_FPS_SLIDER = gradio.Slider( 29 | label = wording.get('webcam_fps_slider'), 30 | value = 25, 31 | step = 1, 32 | minimum = 1, 33 | maximum = 60 34 | ) 35 | register_ui_component('webcam_mode_radio', WEBCAM_MODE_RADIO) 36 | register_ui_component('webcam_resolution_dropdown', WEBCAM_RESOLUTION_DROPDOWN) 37 | register_ui_component('webcam_fps_slider', WEBCAM_FPS_SLIDER) 38 | -------------------------------------------------------------------------------- /facefusion/uis/core.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, Optional, Any, List 2 | from types import ModuleType 3 | import importlib 4 | import sys 5 | import gradio 6 | 7 | import facefusion.globals 8 | from facefusion import metadata, wording 9 | from facefusion.uis.typing import Component, ComponentName 10 | from facefusion.utilities import resolve_relative_path 11 | 12 | UI_COMPONENTS: Dict[ComponentName, Component] = {} 13 | UI_LAYOUT_MODULES : List[ModuleType] = [] 14 | UI_LAYOUT_METHODS =\ 15 | [ 16 | 'pre_check', 17 | 'pre_render', 18 | 'render', 19 | 'listen', 20 | 'run' 21 | ] 22 | 23 | 24 | def load_ui_layout_module(ui_layout : str) -> Any: 25 | try: 26 | ui_layout_module = importlib.import_module('facefusion.uis.layouts.' + ui_layout) 27 | for method_name in UI_LAYOUT_METHODS: 28 | if not hasattr(ui_layout_module, method_name): 29 | raise NotImplementedError 30 | except ModuleNotFoundError: 31 | sys.exit(wording.get('ui_layout_not_loaded').format(ui_layout = ui_layout)) 32 | except NotImplementedError: 33 | sys.exit(wording.get('ui_layout_not_implemented').format(ui_layout = ui_layout)) 34 | return ui_layout_module 35 | 36 | 37 | def get_ui_layouts_modules(ui_layouts : List[str]) -> List[ModuleType]: 38 | global UI_LAYOUT_MODULES 39 | 40 | if not UI_LAYOUT_MODULES: 41 | for ui_layout in ui_layouts: 42 | ui_layout_module = load_ui_layout_module(ui_layout) 43 | UI_LAYOUT_MODULES.append(ui_layout_module) 44 | return UI_LAYOUT_MODULES 45 | 46 | 47 | def get_ui_component(name: ComponentName) -> Optional[Component]: 48 | if name in UI_COMPONENTS: 49 | return UI_COMPONENTS[name] 50 | return None 51 | 52 | 53 | def register_ui_component(name: ComponentName, component: Component) -> None: 54 | UI_COMPONENTS[name] = component 55 | 56 | 57 | def launch() -> None: 58 | with gradio.Blocks(theme = get_theme(), css = get_css(), title = metadata.get('name') + ' ' + metadata.get('version')) as ui: 59 | for ui_layout in facefusion.globals.ui_layouts: 60 | ui_layout_module = load_ui_layout_module(ui_layout) 61 | if ui_layout_module.pre_render(): 62 | ui_layout_module.render() 63 | ui_layout_module.listen() 64 | 65 | for ui_layout in facefusion.globals.ui_layouts: 66 | ui_layout_module = load_ui_layout_module(ui_layout) 67 | ui_layout_module.run(ui) 68 | 69 | 70 | def get_theme() -> gradio.Theme: 71 | return gradio.themes.Base( 72 | primary_hue = gradio.themes.colors.red, 73 | secondary_hue = gradio.themes.colors.neutral, 74 | font = gradio.themes.GoogleFont('Open Sans') 75 | ).set( 76 | background_fill_primary = '*neutral_100', 77 | block_background_fill = 'white', 78 | block_border_width = '0', 79 | block_label_background_fill = '*primary_100', 80 | block_label_background_fill_dark = '*primary_600', 81 | block_label_border_width = 'none', 82 | block_label_margin = '0.5rem', 83 | block_label_radius = '*radius_md', 84 | block_label_text_color = '*primary_500', 85 | block_label_text_color_dark = 'white', 86 | block_label_text_weight = '600', 87 | block_title_background_fill = '*primary_100', 88 | block_title_background_fill_dark = '*primary_600', 89 | block_title_padding = '*block_label_padding', 90 | block_title_radius = '*block_label_radius', 91 | block_title_text_color = '*primary_500', 92 | block_title_text_size = '*text_sm', 93 | block_title_text_weight = '600', 94 | block_padding = '0.5rem', 95 | border_color_primary = 'transparent', 96 | border_color_primary_dark = 'transparent', 97 | button_large_padding = '2rem 0.5rem', 98 | button_large_text_weight = 'normal', 99 | button_primary_background_fill = '*primary_500', 100 | button_primary_text_color = 'white', 101 | button_secondary_background_fill = 'white', 102 | button_secondary_border_color = 'transparent', 103 | button_secondary_border_color_dark = 'transparent', 104 | button_secondary_border_color_hover = 'transparent', 105 | button_secondary_border_color_hover_dark = 'transparent', 106 | button_secondary_text_color = '*neutral_800', 107 | button_small_padding = '0.75rem', 108 | checkbox_background_color = '*neutral_200', 109 | checkbox_background_color_selected = '*primary_600', 110 | checkbox_background_color_selected_dark = '*primary_700', 111 | checkbox_border_color_focus = '*primary_500', 112 | checkbox_border_color_focus_dark = '*primary_600', 113 | checkbox_border_color_selected = '*primary_600', 114 | checkbox_border_color_selected_dark = '*primary_700', 115 | checkbox_label_background_fill = '*neutral_50', 116 | checkbox_label_background_fill_hover = '*neutral_50', 117 | checkbox_label_background_fill_selected = '*primary_500', 118 | checkbox_label_background_fill_selected_dark = '*primary_600', 119 | checkbox_label_text_color_selected = 'white', 120 | input_background_fill = '*neutral_50', 121 | shadow_drop = 'none', 122 | slider_color = '*primary_500', 123 | slider_color_dark = '*primary_600' 124 | ) 125 | 126 | 127 | def get_css() -> str: 128 | fixes_css_path = resolve_relative_path('uis/assets/fixes.css') 129 | overrides_css_path = resolve_relative_path('uis/assets/overrides.css') 130 | return open(fixes_css_path, 'r').read() + open(overrides_css_path, 'r').read() 131 | -------------------------------------------------------------------------------- /facefusion/uis/layouts/benchmark.py: -------------------------------------------------------------------------------- 1 | import gradio 2 | 3 | import facefusion.globals 4 | from facefusion.utilities import conditional_download 5 | from facefusion.uis.components import about, frame_processors, frame_processors_options, execution, execution_thread_count, execution_queue_count, limit_resources, benchmark_options, benchmark 6 | 7 | 8 | def pre_check() -> bool: 9 | if not facefusion.globals.skip_download: 10 | conditional_download('.assets/examples', 11 | [ 12 | 'https://github.com/facefusion/facefusion-assets/releases/download/examples/source.jpg', 13 | 'https://github.com/facefusion/facefusion-assets/releases/download/examples/target-240p.mp4', 14 | 'https://github.com/facefusion/facefusion-assets/releases/download/examples/target-360p.mp4', 15 | 'https://github.com/facefusion/facefusion-assets/releases/download/examples/target-540p.mp4', 16 | 'https://github.com/facefusion/facefusion-assets/releases/download/examples/target-720p.mp4', 17 | 'https://github.com/facefusion/facefusion-assets/releases/download/examples/target-1080p.mp4', 18 | 'https://github.com/facefusion/facefusion-assets/releases/download/examples/target-1440p.mp4', 19 | 'https://github.com/facefusion/facefusion-assets/releases/download/examples/target-2160p.mp4' 20 | ]) 21 | return True 22 | return False 23 | 24 | 25 | def pre_render() -> bool: 26 | return True 27 | 28 | 29 | def render() -> gradio.Blocks: 30 | with gradio.Blocks() as layout: 31 | with gradio.Row(): 32 | with gradio.Column(scale = 2): 33 | with gradio.Blocks(): 34 | about.render() 35 | with gradio.Blocks(): 36 | frame_processors.render() 37 | frame_processors_options.render() 38 | with gradio.Blocks(): 39 | execution.render() 40 | execution_thread_count.render() 41 | execution_queue_count.render() 42 | with gradio.Blocks(): 43 | limit_resources.render() 44 | with gradio.Blocks(): 45 | benchmark_options.render() 46 | with gradio.Column(scale= 5): 47 | with gradio.Blocks(): 48 | benchmark.render() 49 | return layout 50 | 51 | 52 | def listen() -> None: 53 | frame_processors.listen() 54 | frame_processors_options.listen() 55 | execution.listen() 56 | execution_thread_count.listen() 57 | execution_queue_count.listen() 58 | limit_resources.listen() 59 | benchmark.listen() 60 | 61 | 62 | def run(ui : gradio.Blocks) -> None: 63 | ui.queue(concurrency_count = 2, api_open = False).launch(show_api = False) 64 | -------------------------------------------------------------------------------- /facefusion/uis/layouts/default.py: -------------------------------------------------------------------------------- 1 | import gradio 2 | 3 | from facefusion.uis.components import about, frame_processors, frame_processors_options, execution, execution_thread_count, execution_queue_count, limit_resources, temp_frame, output_options, common_options, source, target, preview, trim_frame, face_analyser, face_selector, output 4 | 5 | 6 | def pre_check() -> bool: 7 | return True 8 | 9 | 10 | def pre_render() -> bool: 11 | return True 12 | 13 | 14 | def render() -> gradio.Blocks: 15 | with gradio.Blocks() as layout: 16 | with gradio.Row(): 17 | with gradio.Column(scale = 2): 18 | # with gradio.Blocks(): 19 | # about.render() 20 | with gradio.Blocks(): 21 | frame_processors.render() 22 | frame_processors_options.render() 23 | with gradio.Blocks(): 24 | execution.render() 25 | execution_thread_count.render() 26 | execution_queue_count.render() 27 | with gradio.Blocks(): 28 | limit_resources.render() 29 | with gradio.Blocks(): 30 | temp_frame.render() 31 | with gradio.Blocks(): 32 | output_options.render() 33 | with gradio.Column(scale = 2): 34 | with gradio.Blocks(): 35 | source.render() 36 | with gradio.Blocks(): 37 | target.render() 38 | with gradio.Blocks(): 39 | output.render() 40 | with gradio.Column(scale = 3): 41 | with gradio.Blocks(): 42 | preview.render() 43 | with gradio.Row(): 44 | trim_frame.render() 45 | with gradio.Blocks(): 46 | face_selector.render() 47 | with gradio.Row(): 48 | face_analyser.render() 49 | with gradio.Blocks(): 50 | common_options.render() 51 | return layout 52 | 53 | 54 | def listen() -> None: 55 | frame_processors.listen() 56 | frame_processors_options.listen() 57 | execution.listen() 58 | execution_thread_count.listen() 59 | execution_queue_count.listen() 60 | limit_resources.listen() 61 | temp_frame.listen() 62 | output_options.listen() 63 | common_options.listen() 64 | source.listen() 65 | target.listen() 66 | preview.listen() 67 | trim_frame.listen() 68 | face_selector.listen() 69 | face_analyser.listen() 70 | output.listen() 71 | 72 | 73 | def run(ui : gradio.Blocks) -> None: 74 | ui.launch(show_api = False) 75 | -------------------------------------------------------------------------------- /facefusion/uis/layouts/webcam.py: -------------------------------------------------------------------------------- 1 | import gradio 2 | 3 | from facefusion.uis.components import about, frame_processors, frame_processors_options, execution, execution_thread_count, webcam_options, source, webcam 4 | 5 | 6 | def pre_check() -> bool: 7 | return True 8 | 9 | 10 | def pre_render() -> bool: 11 | return True 12 | 13 | 14 | def render() -> gradio.Blocks: 15 | with gradio.Blocks() as layout: 16 | with gradio.Row(): 17 | with gradio.Column(scale = 2): 18 | with gradio.Blocks(): 19 | about.render() 20 | with gradio.Blocks(): 21 | frame_processors.render() 22 | frame_processors_options.render() 23 | with gradio.Blocks(): 24 | execution.render() 25 | execution_thread_count.render() 26 | with gradio.Blocks(): 27 | webcam_options.render() 28 | with gradio.Blocks(): 29 | source.render() 30 | with gradio.Column(scale = 5): 31 | with gradio.Blocks(): 32 | webcam.render() 33 | return layout 34 | 35 | 36 | def listen() -> None: 37 | frame_processors.listen() 38 | frame_processors_options.listen() 39 | execution.listen() 40 | execution_thread_count.listen() 41 | source.listen() 42 | webcam.listen() 43 | 44 | 45 | def run(ui : gradio.Blocks) -> None: 46 | ui.queue(concurrency_count = 2, api_open = False).launch(show_api = False) 47 | -------------------------------------------------------------------------------- /facefusion/uis/typing.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Dict, Literal 2 | import gradio 3 | 4 | Component = gradio.File or gradio.Image or gradio.Video or gradio.Slider 5 | ComponentName = Literal\ 6 | [ 7 | 'source_image', 8 | 'target_image', 9 | 'target_video', 10 | 'preview_frame_slider', 11 | 'face_recognition_dropdown', 12 | 'reference_face_position_gallery', 13 | 'reference_face_distance_slider', 14 | 'face_analyser_direction_dropdown', 15 | 'face_analyser_age_dropdown', 16 | 'face_analyser_gender_dropdown', 17 | 'frame_processors_checkbox_group', 18 | 'face_swapper_model_dropdown', 19 | 'face_enhancer_model_dropdown', 20 | 'face_enhancer_blend_slider', 21 | 'frame_enhancer_model_dropdown', 22 | 'frame_enhancer_blend_slider', 23 | 'output_path_textbox', 24 | 'benchmark_runs_checkbox_group', 25 | 'benchmark_cycles_slider', 26 | 'player_url_textbox_label', 27 | 'webcam_mode_radio', 28 | 'webcam_resolution_dropdown', 29 | 'webcam_fps_slider' 30 | ] 31 | WebcamMode = Literal[ 'inline', 'udp', 'v4l2' ] 32 | StreamMode = Literal[ 'udp', 'v4l2' ] 33 | Update = Dict[Any, Any] 34 | -------------------------------------------------------------------------------- /facefusion/utilities.py: -------------------------------------------------------------------------------- 1 | from typing import List, Optional 2 | from functools import lru_cache 3 | from pathlib import Path 4 | from tqdm import tqdm 5 | import glob 6 | import mimetypes 7 | import os 8 | import platform 9 | import shutil 10 | import ssl 11 | import subprocess 12 | import tempfile 13 | import urllib 14 | import onnxruntime 15 | 16 | import facefusion.globals 17 | from facefusion import wording 18 | from facefusion.vision import detect_fps 19 | 20 | TEMP_DIRECTORY_PATH = os.path.join(tempfile.gettempdir(), 'facefusion') 21 | TEMP_OUTPUT_VIDEO_NAME = 'temp.mp4' 22 | 23 | # monkey patch ssl 24 | if platform.system().lower() == 'darwin': 25 | ssl._create_default_https_context = ssl._create_unverified_context 26 | 27 | 28 | def run_ffmpeg(args : List[str]) -> bool: 29 | commands = [ 'ffmpeg', '-hide_banner', '-loglevel', 'error' ] 30 | commands.extend(args) 31 | try: 32 | subprocess.run(commands, stderr = subprocess.PIPE, check = True) 33 | return True 34 | except subprocess.CalledProcessError: 35 | return False 36 | 37 | 38 | def open_ffmpeg(args : List[str]) -> subprocess.Popen[bytes]: 39 | commands = [ 'ffmpeg', '-hide_banner', '-loglevel', 'error' ] 40 | commands.extend(args) 41 | return subprocess.Popen(commands, stdin = subprocess.PIPE) 42 | 43 | 44 | def extract_frames(target_path : str, fps : float) -> bool: 45 | temp_frame_compression = round(31 - (facefusion.globals.temp_frame_quality * 0.31)) 46 | trim_frame_start = facefusion.globals.trim_frame_start 47 | trim_frame_end = facefusion.globals.trim_frame_end 48 | temp_frames_pattern = get_temp_frames_pattern(target_path, '%04d') 49 | commands = [ '-hwaccel', 'auto', '-i', target_path, '-q:v', str(temp_frame_compression), '-pix_fmt', 'rgb24' ] 50 | if trim_frame_start is not None and trim_frame_end is not None: 51 | commands.extend([ '-vf', 'trim=start_frame=' + str(trim_frame_start) + ':end_frame=' + str(trim_frame_end) + ',fps=' + str(fps) ]) 52 | elif trim_frame_start is not None: 53 | commands.extend([ '-vf', 'trim=start_frame=' + str(trim_frame_start) + ',fps=' + str(fps) ]) 54 | elif trim_frame_end is not None: 55 | commands.extend([ '-vf', 'trim=end_frame=' + str(trim_frame_end) + ',fps=' + str(fps) ]) 56 | else: 57 | commands.extend([ '-vf', 'fps=' + str(fps) ]) 58 | commands.extend([ '-vsync', '0', temp_frames_pattern ]) 59 | return run_ffmpeg(commands) 60 | 61 | 62 | def compress_image(output_path : str) -> bool: 63 | output_image_compression = round(31 - (facefusion.globals.output_image_quality * 0.31)) 64 | commands = [ '-hwaccel', 'auto', '-i', output_path, '-q:v', str(output_image_compression), '-y', output_path ] 65 | return run_ffmpeg(commands) 66 | 67 | 68 | def merge_video(target_path : str, fps : float) -> bool: 69 | temp_output_video_path = get_temp_output_video_path(target_path) 70 | temp_frames_pattern = get_temp_frames_pattern(target_path, '%04d') 71 | commands = [ '-hwaccel', 'auto', '-r', str(fps), '-i', temp_frames_pattern, '-c:v', facefusion.globals.output_video_encoder ] 72 | if facefusion.globals.output_video_encoder in [ 'libx264', 'libx265' ]: 73 | output_video_compression = round(51 - (facefusion.globals.output_video_quality * 0.51)) 74 | commands.extend([ '-crf', str(output_video_compression) ]) 75 | if facefusion.globals.output_video_encoder in [ 'libvpx-vp9' ]: 76 | output_video_compression = round(63 - (facefusion.globals.output_video_quality * 0.63)) 77 | commands.extend([ '-crf', str(output_video_compression) ]) 78 | if facefusion.globals.output_video_encoder in [ 'h264_nvenc', 'hevc_nvenc' ]: 79 | output_video_compression = round(51 - (facefusion.globals.output_video_quality * 0.51)) 80 | commands.extend([ '-cq', str(output_video_compression) ]) 81 | commands.extend([ '-pix_fmt', 'yuv420p', '-colorspace', 'bt709', '-y', temp_output_video_path ]) 82 | return run_ffmpeg(commands) 83 | 84 | 85 | def restore_audio(target_path : str, output_path : str) -> bool: 86 | fps = detect_fps(target_path) 87 | trim_frame_start = facefusion.globals.trim_frame_start 88 | trim_frame_end = facefusion.globals.trim_frame_end 89 | temp_output_video_path = get_temp_output_video_path(target_path) 90 | commands = [ '-hwaccel', 'auto', '-i', temp_output_video_path ] 91 | if trim_frame_start is not None: 92 | start_time = trim_frame_start / fps 93 | commands.extend([ '-ss', str(start_time) ]) 94 | if trim_frame_end is not None: 95 | end_time = trim_frame_end / fps 96 | commands.extend([ '-to', str(end_time) ]) 97 | commands.extend([ '-i', target_path, '-c', 'copy', '-map', '0:v:0', '-map', '1:a:0', '-shortest', '-y', output_path ]) 98 | return run_ffmpeg(commands) 99 | 100 | 101 | def get_temp_frame_paths(target_path : str) -> List[str]: 102 | temp_frames_pattern = get_temp_frames_pattern(target_path, '*') 103 | return sorted(glob.glob(temp_frames_pattern)) 104 | 105 | 106 | def get_temp_frames_pattern(target_path : str, temp_frame_prefix : str) -> str: 107 | temp_directory_path = get_temp_directory_path(target_path) 108 | return os.path.join(temp_directory_path, temp_frame_prefix + '.' + facefusion.globals.temp_frame_format) 109 | 110 | 111 | def get_temp_directory_path(target_path : str) -> str: 112 | target_name, _ = os.path.splitext(os.path.basename(target_path)) 113 | return os.path.join(TEMP_DIRECTORY_PATH, target_name) 114 | 115 | 116 | def get_temp_output_video_path(target_path : str) -> str: 117 | temp_directory_path = get_temp_directory_path(target_path) 118 | return os.path.join(temp_directory_path, TEMP_OUTPUT_VIDEO_NAME) 119 | 120 | 121 | def normalize_output_path(source_path : Optional[str], target_path : Optional[str], output_path : Optional[str]) -> Optional[str]: 122 | if is_file(source_path) and is_file(target_path) and is_directory(output_path): 123 | source_name, _ = os.path.splitext(os.path.basename(source_path)) 124 | target_name, target_extension = os.path.splitext(os.path.basename(target_path)) 125 | return os.path.join(output_path, source_name + '-' + target_name + target_extension) 126 | if is_file(target_path) and output_path: 127 | target_name, target_extension = os.path.splitext(os.path.basename(target_path)) 128 | output_name, output_extension = os.path.splitext(os.path.basename(output_path)) 129 | output_directory_path = os.path.dirname(output_path) 130 | if is_directory(output_directory_path) and output_extension: 131 | return os.path.join(output_directory_path, output_name + target_extension) 132 | return None 133 | return output_path 134 | 135 | 136 | def create_temp(target_path : str) -> None: 137 | temp_directory_path = get_temp_directory_path(target_path) 138 | Path(temp_directory_path).mkdir(parents = True, exist_ok = True) 139 | 140 | 141 | def move_temp(target_path : str, output_path : str) -> None: 142 | temp_output_video_path = get_temp_output_video_path(target_path) 143 | if is_file(temp_output_video_path): 144 | if is_file(output_path): 145 | os.remove(output_path) 146 | shutil.move(temp_output_video_path, output_path) 147 | 148 | 149 | def clear_temp(target_path : str) -> None: 150 | temp_directory_path = get_temp_directory_path(target_path) 151 | parent_directory_path = os.path.dirname(temp_directory_path) 152 | if not facefusion.globals.keep_temp and is_directory(temp_directory_path): 153 | shutil.rmtree(temp_directory_path) 154 | if os.path.exists(parent_directory_path) and not os.listdir(parent_directory_path): 155 | os.rmdir(parent_directory_path) 156 | 157 | 158 | def is_file(file_path : str) -> bool: 159 | return bool(file_path and os.path.isfile(file_path)) 160 | 161 | 162 | def is_directory(directory_path : str) -> bool: 163 | return bool(directory_path and os.path.isdir(directory_path)) 164 | 165 | 166 | def is_image(image_path : str) -> bool: 167 | if is_file(image_path): 168 | mimetype, _ = mimetypes.guess_type(image_path) 169 | return bool(mimetype and mimetype.startswith('image/')) 170 | return False 171 | 172 | 173 | def is_video(video_path : str) -> bool: 174 | if is_file(video_path): 175 | mimetype, _ = mimetypes.guess_type(video_path) 176 | return bool(mimetype and mimetype.startswith('video/')) 177 | return False 178 | 179 | 180 | def conditional_download(download_directory_path : str, urls : List[str]) -> None: 181 | for url in urls: 182 | download_file_path = os.path.join(download_directory_path, os.path.basename(url)) 183 | total = get_download_size(url) 184 | if is_file(download_file_path): 185 | initial = os.path.getsize(download_file_path) 186 | else: 187 | initial = 0 188 | if initial < total: 189 | with tqdm(total = total, initial = initial, desc = wording.get('downloading'), unit = 'B', unit_scale = True, unit_divisor = 1024) as progress: 190 | subprocess.Popen([ 'curl', '--create-dirs', '--silent', '--insecure', '--location', '--continue-at', '-', '--output', download_file_path, url ]) 191 | current = initial 192 | while current < total: 193 | if is_file(download_file_path): 194 | current = os.path.getsize(download_file_path) 195 | progress.update(current - progress.n) 196 | 197 | 198 | @lru_cache(maxsize = None) 199 | def get_download_size(url : str) -> int: 200 | try: 201 | response = urllib.request.urlopen(url) # type: ignore[attr-defined] 202 | return int(response.getheader('Content-Length')) 203 | except (OSError, ValueError): 204 | return 0 205 | 206 | 207 | def is_download_done(url : str, file_path : str) -> bool: 208 | if is_file(file_path): 209 | return get_download_size(url) == os.path.getsize(file_path) 210 | return False 211 | 212 | 213 | def resolve_relative_path(path : str) -> str: 214 | return os.path.abspath(os.path.join(os.path.dirname(__file__), path)) 215 | 216 | 217 | _PROJECT_DIR = Path(__file__).absolute().parent.parent 218 | 219 | def list_module_names(path : str) -> Optional[List[str]]: 220 | path = str(_PROJECT_DIR / path) 221 | if os.path.exists(path): 222 | files = os.listdir(path) 223 | return [ Path(file).stem for file in files if not Path(file).stem.startswith('__') ] 224 | return None 225 | 226 | 227 | def encode_execution_providers(execution_providers : List[str]) -> List[str]: 228 | return [ execution_provider.replace('ExecutionProvider', '').lower() for execution_provider in execution_providers ] 229 | 230 | 231 | def decode_execution_providers(execution_providers: List[str]) -> List[str]: 232 | available_execution_providers = onnxruntime.get_available_providers() 233 | encoded_execution_providers = encode_execution_providers(available_execution_providers) 234 | return [ execution_provider for execution_provider, encoded_execution_provider in zip(available_execution_providers, encoded_execution_providers) if any(execution_provider in encoded_execution_provider for execution_provider in execution_providers) ] 235 | 236 | 237 | def get_device(execution_providers : List[str]) -> str: 238 | if 'CUDAExecutionProvider' in execution_providers: 239 | return 'cuda' 240 | if 'CoreMLExecutionProvider' in execution_providers: 241 | return 'mps' 242 | return 'cpu' 243 | -------------------------------------------------------------------------------- /facefusion/vision.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | from functools import lru_cache 3 | import cv2 4 | 5 | from facefusion.typing import Frame 6 | 7 | 8 | def get_video_frame(video_path : str, frame_number : int = 0) -> Optional[Frame]: 9 | if video_path: 10 | capture = cv2.VideoCapture(video_path) 11 | if capture.isOpened(): 12 | frame_total = capture.get(cv2.CAP_PROP_FRAME_COUNT) 13 | capture.set(cv2.CAP_PROP_POS_FRAMES, min(frame_total, frame_number - 1)) 14 | has_frame, frame = capture.read() 15 | capture.release() 16 | if has_frame: 17 | return frame 18 | return None 19 | 20 | 21 | def detect_fps(video_path : str) -> Optional[float]: 22 | if video_path: 23 | capture = cv2.VideoCapture(video_path) 24 | if capture.isOpened(): 25 | return capture.get(cv2.CAP_PROP_FPS) 26 | return None 27 | 28 | 29 | def count_video_frame_total(video_path : str) -> int: 30 | if video_path: 31 | capture = cv2.VideoCapture(video_path) 32 | if capture.isOpened(): 33 | video_frame_total = int(capture.get(cv2.CAP_PROP_FRAME_COUNT)) 34 | capture.release() 35 | return video_frame_total 36 | return 0 37 | 38 | 39 | def normalize_frame_color(frame : Frame) -> Frame: 40 | return cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) 41 | 42 | 43 | def resize_frame_dimension(frame : Frame, max_width : int, max_height : int) -> Frame: 44 | height, width = frame.shape[:2] 45 | if height > max_height or width > max_width: 46 | scale = min(max_height / height, max_width / width) 47 | new_width = int(width * scale) 48 | new_height = int(height * scale) 49 | return cv2.resize(frame, (new_width, new_height)) 50 | return frame 51 | 52 | 53 | @lru_cache(maxsize = 128) 54 | def read_static_image(image_path : str) -> Optional[Frame]: 55 | return read_image(image_path) 56 | 57 | 58 | def read_image(image_path : str) -> Optional[Frame]: 59 | if image_path: 60 | return cv2.imread(image_path) 61 | return None 62 | 63 | 64 | def write_image(image_path : str, frame : Frame) -> bool: 65 | if image_path: 66 | return cv2.imwrite(image_path, frame) 67 | return False 68 | -------------------------------------------------------------------------------- /facefusion/wording.py: -------------------------------------------------------------------------------- 1 | WORDING =\ 2 | { 3 | 'python_not_supported': 'Python version is not supported, upgrade to {version} or higher', 4 | 'ffmpeg_not_installed': 'FFMpeg is not installed', 5 | 'install_dependency_help': 'select the variant of {dependency} to install', 6 | 'source_help': 'select a source image', 7 | 'target_help': 'select a target image or video', 8 | 'output_help': 'specify the output file or directory', 9 | 'frame_processors_help': 'choose from the available frame processors (choices: {choices}, ...)', 10 | 'frame_processor_model_help': 'choose from the mode for the frame processor', 11 | 'frame_processor_blend_help': 'specify the blend factor for the frame processor', 12 | 'ui_layouts_help': 'choose from the available ui layouts (choices: {choices}, ...)', 13 | 'keep_fps_help': 'preserve the frames per second (fps) of the target', 14 | 'keep_temp_help': 'retain temporary frames after processing', 15 | 'skip_audio_help': 'omit audio from the target', 16 | 'face_recognition_help': 'specify the method for face recognition', 17 | 'face_analyser_direction_help': 'specify the direction used for face analysis', 18 | 'face_analyser_age_help': 'specify the age used for face analysis', 19 | 'face_analyser_gender_help': 'specify the gender used for face analysis', 20 | 'reference_face_position_help': 'specify the position of the reference face', 21 | 'reference_face_distance_help': 'specify the distance between the reference face and the target face', 22 | 'reference_frame_number_help': 'specify the number of the reference frame', 23 | 'trim_frame_start_help': 'specify the start frame for extraction', 24 | 'trim_frame_end_help': 'specify the end frame for extraction', 25 | 'temp_frame_format_help': 'specify the image format used for frame extraction', 26 | 'temp_frame_quality_help': 'specify the image quality used for frame extraction', 27 | 'output_image_quality_help': 'specify the quality used for the output image', 28 | 'output_video_encoder_help': 'specify the encoder used for the output video', 29 | 'output_video_quality_help': 'specify the quality used for the output video', 30 | 'max_memory_help': 'specify the maximum amount of ram to be used (in gb)', 31 | 'execution_providers_help': 'choose from the available execution providers (choices: {choices}, ...)', 32 | 'execution_thread_count_help': 'specify the number of execution threads', 33 | 'execution_queue_count_help': 'specify the number of execution queries', 34 | 'skip_download_help': 'omit automate downloads and lookups', 35 | 'headless_help': 'run the program in headless mode', 36 | 'creating_temp': 'Creating temporary resources', 37 | 'extracting_frames_fps': 'Extracting frames with {fps} FPS', 38 | 'processing': 'Processing', 39 | 'downloading': 'Downloading', 40 | 'temp_frames_not_found': 'Temporary frames not found', 41 | 'compressing_image': 'Compressing image', 42 | 'compressing_image_failed': 'Compressing image failed', 43 | 'merging_video_fps': 'Merging video with {fps} FPS', 44 | 'merging_video_failed': 'Merging video failed', 45 | 'skipping_audio': 'Skipping audio', 46 | 'restoring_audio': 'Restoring audio', 47 | 'restoring_audio_failed': 'Restoring audio failed', 48 | 'clearing_temp': 'Clearing temporary resources', 49 | 'processing_image_succeed': 'Processing to image succeed', 50 | 'processing_image_failed': 'Processing to image failed', 51 | 'processing_video_succeed': 'Processing to video succeed', 52 | 'processing_video_failed': 'Processing to video failed', 53 | 'model_download_not_done': 'Download of the model is not done', 54 | 'model_file_not_present': 'File of the model is not present', 55 | 'select_image_source': 'Select an image for source path', 56 | 'select_image_or_video_target': 'Select an image or video for target path', 57 | 'select_file_or_directory_output': 'Select an file or directory for output path', 58 | 'no_source_face_detected': 'No source face detected', 59 | 'frame_processor_not_loaded': 'Frame processor {frame_processor} could not be loaded', 60 | 'frame_processor_not_implemented': 'Frame processor {frame_processor} not implemented correctly', 61 | 'ui_layout_not_loaded': 'UI layout {ui_layout} could not be loaded', 62 | 'ui_layout_not_implemented': 'UI layout {ui_layout} not implemented correctly', 63 | 'donate_button_label': 'DONATE', 64 | 'start_button_label': 'START', 65 | 'stop_button_label': 'STOP', 66 | 'clear_button_label': 'CLEAR', 67 | 'benchmark_runs_checkbox_group_label': 'BENCHMARK RUNS', 68 | 'benchmark_results_dataframe_label': 'BENCHMARK RESULTS', 69 | 'benchmark_cycles_slider_label': 'BENCHMARK CYCLES', 70 | 'execution_providers_checkbox_group_label': 'EXECUTION PROVIDERS', 71 | 'execution_thread_count_slider_label': 'EXECUTION THREAD COUNT', 72 | 'execution_queue_count_slider_label': 'EXECUTION QUEUE COUNT', 73 | 'face_analyser_direction_dropdown_label': 'FACE ANALYSER DIRECTION', 74 | 'face_analyser_age_dropdown_label': 'FACE ANALYSER AGE', 75 | 'face_analyser_gender_dropdown_label': 'FACE ANALYSER GENDER', 76 | 'reference_face_gallery_label': 'REFERENCE FACE', 77 | 'face_recognition_dropdown_label': 'FACE RECOGNITION', 78 | 'reference_face_distance_slider_label': 'REFERENCE FACE DISTANCE', 79 | 'max_memory_slider_label': 'MAX MEMORY', 80 | 'output_image_or_video_label': 'OUTPUT', 81 | 'output_path_textbox_label': 'OUTPUT PATH', 82 | 'output_image_quality_slider_label': 'OUTPUT IMAGE QUALITY', 83 | 'output_video_encoder_dropdown_label': 'OUTPUT VIDEO ENCODER', 84 | 'output_video_quality_slider_label': 'OUTPUT VIDEO QUALITY', 85 | 'preview_image_label': 'PREVIEW', 86 | 'preview_frame_slider_label': 'PREVIEW FRAME', 87 | 'frame_processors_checkbox_group_label': 'FRAME PROCESSORS', 88 | 'face_swapper_model_dropdown_label': 'FACE SWAPPER MODEL', 89 | 'face_enhancer_model_dropdown_label': 'FACE ENHANCER MODEL', 90 | 'face_enhancer_blend_slider_label': 'FACE ENHANCER BLEND', 91 | 'frame_enhancer_model_dropdown_label': 'FRAME ENHANCER MODEL', 92 | 'frame_enhancer_blend_slider_label': 'FRAME ENHANCER BLEND', 93 | 'common_options_checkbox_group_label': 'OPTIONS', 94 | 'temp_frame_format_dropdown_label': 'TEMP FRAME FORMAT', 95 | 'temp_frame_quality_slider_label': 'TEMP FRAME QUALITY', 96 | 'trim_frame_start_slider_label': 'TRIM FRAME START', 97 | 'trim_frame_end_slider_label': 'TRIM FRAME END', 98 | 'source_file_label': 'SOURCE', 99 | 'target_file_label': 'TARGET', 100 | 'webcam_image_label': 'WEBCAM', 101 | 'webcam_mode_radio_label': 'WEBCAM MODE', 102 | 'webcam_resolution_dropdown': 'WEBCAM RESOLUTION', 103 | 'webcam_fps_slider': 'WEBCAM FPS', 104 | 'point': '.', 105 | 'comma': ',', 106 | 'colon': ':', 107 | 'question_mark': '?', 108 | 'exclamation_mark': '!' 109 | } 110 | 111 | 112 | def get(key : str) -> str: 113 | return WORDING[key] 114 | -------------------------------------------------------------------------------- /install.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | from pathlib import Path 4 | 5 | import launch 6 | import pkg_resources 7 | 8 | _REQUIREMENT_PATH = Path(__file__).absolute().parent / "requirements.txt" 9 | 10 | 11 | def _get_comparable_version(version: str) -> tuple: 12 | return tuple(version.split(".")) 13 | 14 | 15 | def _get_installed_version(package: str) -> str | None: 16 | try: 17 | return pkg_resources.get_distribution(package).version 18 | except Exception: 19 | return None 20 | 21 | 22 | if not launch.is_installed("onnxruntime") and not launch.is_installed("onnxruntime-gpu"): 23 | import torch.cuda as cuda 24 | 25 | if cuda.is_available(): 26 | launch.run_pip('install "onnxruntime-gpu>=1.16.0"') 27 | else: 28 | launch.run_pip('install "onnxruntime>=1.16.0"') 29 | 30 | 31 | with _REQUIREMENT_PATH.open() as fp: 32 | for requirement in fp: 33 | try: 34 | requirement = requirement.strip() 35 | if "==" in requirement: 36 | name, version = requirement.split("==", 1) 37 | installed_version = _get_installed_version(name) 38 | 39 | if installed_version == version: 40 | continue 41 | 42 | launch.run_pip( 43 | f'install -U "{requirement}"', 44 | f"sd-webui-facefusion requirement: changing {name} version from {installed_version} to {version}", 45 | ) 46 | continue 47 | 48 | if ">=" in requirement: 49 | name, version = requirement.split(">=", 1) 50 | installed_version = _get_installed_version(name) 51 | 52 | if installed_version and ( 53 | _get_comparable_version(installed_version) >= _get_comparable_version(version) 54 | ): 55 | continue 56 | 57 | launch.run_pip( 58 | f'install -U "{requirement}"', 59 | f"sd-webui-facefusion requirement: changing {name} version from {installed_version} to {version}", 60 | ) 61 | continue 62 | 63 | if not launch.is_installed(requirement): 64 | launch.run_pip( 65 | f'install "{requirement}"', 66 | f"sd-webui-facefusion requirement: {requirement}", 67 | ) 68 | except Exception as error: 69 | print(error) 70 | print(f"Warning: Failed to install '{requirement}', some preprocessors may not work.") 71 | -------------------------------------------------------------------------------- /install_origin.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | from facefusion import installer 4 | 5 | if __name__ == '__main__': 6 | installer.cli() 7 | -------------------------------------------------------------------------------- /mypy.ini: -------------------------------------------------------------------------------- 1 | [mypy] 2 | check_untyped_defs = True 3 | disallow_any_generics = True 4 | disallow_untyped_calls = True 5 | disallow_untyped_defs = True 6 | ignore_missing_imports = True 7 | strict_optional = False 8 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | albumentations==1.4.3 2 | insightface>=0.7.3 3 | onnx>=1.14.1 4 | opennsfw2>=0.10.2 5 | tensorflow>=2.13.0 6 | basicsr==1.4.2 7 | realesrgan==0.3.0 8 | -------------------------------------------------------------------------------- /run.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | from facefusion import core 4 | 5 | if __name__ == '__main__': 6 | core.cli() 7 | -------------------------------------------------------------------------------- /scripts/facefusion_ui.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import gradio as gr 4 | from modules import script_callbacks 5 | 6 | from facefusion.core import apply_args, get_argument_parser, limit_resources, pre_check 7 | from facefusion.processors.frame.modules import face_enhancer, face_swapper, frame_enhancer 8 | from facefusion.uis.layouts import default 9 | 10 | 11 | def on_ui_tabs(): 12 | apply_args(get_argument_parser()) 13 | limit_resources() 14 | 15 | if not pre_check(): 16 | return 17 | 18 | if ( 19 | not face_enhancer.pre_check() 20 | or not face_swapper.pre_check() 21 | or not frame_enhancer.pre_check() 22 | ): 23 | return 24 | 25 | if not default.pre_check(): 26 | return 27 | 28 | with gr.Blocks() as block: 29 | if default.pre_render(): 30 | default.render() 31 | default.listen() 32 | 33 | return ((block, "FaceFusion", "facefusion"),) 34 | 35 | 36 | script_callbacks.on_ui_tabs(on_ui_tabs) 37 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/diffus-me/sd-webui-facefusion/f84fed74bd20374112c7009f8d75c391d64cfa00/tests/__init__.py -------------------------------------------------------------------------------- /tests/test_cli.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | import sys 3 | import pytest 4 | 5 | from facefusion import wording 6 | from facefusion.utilities import conditional_download 7 | 8 | 9 | @pytest.fixture(scope = 'module', autouse = True) 10 | def before_all() -> None: 11 | conditional_download('.assets/examples', 12 | [ 13 | 'https://github.com/facefusion/facefusion-assets/releases/download/examples/source.jpg', 14 | 'https://github.com/facefusion/facefusion-assets/releases/download/examples/target-1080p.mp4' 15 | ]) 16 | subprocess.run([ 'ffmpeg', '-i', '.assets/examples/target-1080p.mp4', '-vframes', '1', '.assets/examples/target-1080p.jpg' ]) 17 | 18 | 19 | def test_image_to_image() -> None: 20 | commands = [ sys.executable, 'run.py', '-s', '.assets/examples/source.jpg', '-t', '.assets/examples/target-1080p.jpg', '-o', '.assets/examples', '--headless' ] 21 | run = subprocess.run(commands, stdout = subprocess.PIPE) 22 | 23 | assert run.returncode == 0 24 | assert wording.get('processing_image_succeed') in run.stdout.decode() 25 | 26 | 27 | def test_image_to_video() -> None: 28 | commands = [ sys.executable, 'run.py', '-s', '.assets/examples/source.jpg', '-t', '.assets/examples/target-1080p.mp4', '-o', '.assets/examples', '--trim-frame-end', '10', '--headless' ] 29 | run = subprocess.run(commands, stdout = subprocess.PIPE) 30 | 31 | assert run.returncode == 0 32 | assert wording.get('processing_video_succeed') in run.stdout.decode() 33 | -------------------------------------------------------------------------------- /tests/test_utilities.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import platform 3 | import subprocess 4 | import pytest 5 | 6 | import facefusion.globals 7 | from facefusion.utilities import conditional_download, extract_frames, create_temp, get_temp_directory_path, clear_temp, normalize_output_path, is_file, is_directory, is_image, is_video, get_download_size, is_download_done, encode_execution_providers, decode_execution_providers 8 | 9 | 10 | @pytest.fixture(scope = 'module', autouse = True) 11 | def before_all() -> None: 12 | facefusion.globals.temp_frame_quality = 100 13 | facefusion.globals.trim_frame_start = None 14 | facefusion.globals.trim_frame_end = None 15 | facefusion.globals.temp_frame_format = 'png' 16 | conditional_download('.assets/examples', 17 | [ 18 | 'https://github.com/facefusion/facefusion-assets/releases/download/examples/source.jpg', 19 | 'https://github.com/facefusion/facefusion-assets/releases/download/examples/target-240p.mp4' 20 | ]) 21 | subprocess.run([ 'ffmpeg', '-i', '.assets/examples/target-240p.mp4', '-vf', 'fps=25', '.assets/examples/target-240p-25fps.mp4' ]) 22 | subprocess.run([ 'ffmpeg', '-i', '.assets/examples/target-240p.mp4', '-vf', 'fps=30', '.assets/examples/target-240p-30fps.mp4' ]) 23 | subprocess.run([ 'ffmpeg', '-i', '.assets/examples/target-240p.mp4', '-vf', 'fps=60', '.assets/examples/target-240p-60fps.mp4' ]) 24 | 25 | 26 | @pytest.fixture(scope = 'function', autouse = True) 27 | def before_each() -> None: 28 | facefusion.globals.trim_frame_start = None 29 | facefusion.globals.trim_frame_end = None 30 | facefusion.globals.temp_frame_quality = 90 31 | facefusion.globals.temp_frame_format = 'jpg' 32 | 33 | 34 | def test_extract_frames() -> None: 35 | target_paths =\ 36 | [ 37 | '.assets/examples/target-240p-25fps.mp4', 38 | '.assets/examples/target-240p-30fps.mp4', 39 | '.assets/examples/target-240p-60fps.mp4' 40 | ] 41 | for target_path in target_paths: 42 | temp_directory_path = get_temp_directory_path(target_path) 43 | create_temp(target_path) 44 | 45 | assert extract_frames(target_path, 30.0) is True 46 | assert len(glob.glob1(temp_directory_path, '*.jpg')) == 324 47 | 48 | clear_temp(target_path) 49 | 50 | 51 | def test_extract_frames_with_trim_start() -> None: 52 | facefusion.globals.trim_frame_start = 224 53 | data_provider =\ 54 | [ 55 | ('.assets/examples/target-240p-25fps.mp4', 55), 56 | ('.assets/examples/target-240p-30fps.mp4', 100), 57 | ('.assets/examples/target-240p-60fps.mp4', 212) 58 | ] 59 | for target_path, frame_total in data_provider: 60 | temp_directory_path = get_temp_directory_path(target_path) 61 | create_temp(target_path) 62 | 63 | assert extract_frames(target_path, 30.0) is True 64 | assert len(glob.glob1(temp_directory_path, '*.jpg')) == frame_total 65 | 66 | clear_temp(target_path) 67 | 68 | 69 | def test_extract_frames_with_trim_start_and_trim_end() -> None: 70 | facefusion.globals.trim_frame_start = 124 71 | facefusion.globals.trim_frame_end = 224 72 | data_provider =\ 73 | [ 74 | ('.assets/examples/target-240p-25fps.mp4', 120), 75 | ('.assets/examples/target-240p-30fps.mp4', 100), 76 | ('.assets/examples/target-240p-60fps.mp4', 50) 77 | ] 78 | for target_path, frame_total in data_provider: 79 | temp_directory_path = get_temp_directory_path(target_path) 80 | create_temp(target_path) 81 | 82 | assert extract_frames(target_path, 30.0) is True 83 | assert len(glob.glob1(temp_directory_path, '*.jpg')) == frame_total 84 | 85 | clear_temp(target_path) 86 | 87 | 88 | def test_extract_frames_with_trim_end() -> None: 89 | facefusion.globals.trim_frame_end = 100 90 | data_provider =\ 91 | [ 92 | ('.assets/examples/target-240p-25fps.mp4', 120), 93 | ('.assets/examples/target-240p-30fps.mp4', 100), 94 | ('.assets/examples/target-240p-60fps.mp4', 50) 95 | ] 96 | for target_path, frame_total in data_provider: 97 | temp_directory_path = get_temp_directory_path(target_path) 98 | create_temp(target_path) 99 | 100 | assert extract_frames(target_path, 30.0) is True 101 | assert len(glob.glob1(temp_directory_path, '*.jpg')) == frame_total 102 | 103 | clear_temp(target_path) 104 | 105 | 106 | def test_normalize_output_path() -> None: 107 | if platform.system().lower() != 'windows': 108 | assert normalize_output_path('.assets/examples/source.jpg', None, '.assets/examples/target-240p.mp4') == '.assets/examples/target-240p.mp4' 109 | assert normalize_output_path(None, '.assets/examples/target-240p.mp4', '.assets/examples/target-240p.mp4') == '.assets/examples/target-240p.mp4' 110 | assert normalize_output_path('.assets/examples/source.jpg', '.assets/examples/target-240p.mp4', '.assets/examples') == '.assets/examples/source-target-240p.mp4' 111 | assert normalize_output_path(None, '.assets/examples/target-240p.mp4', '.assets/examples/output.mp4') == '.assets/examples/output.mp4' 112 | assert normalize_output_path(None, '.assets/examples/target-240p.mp4', '.assets/output.mov') == '.assets/output.mp4' 113 | assert normalize_output_path(None, '.assets/examples/target-240p.mp4', '.assets/examples/invalid') is None 114 | assert normalize_output_path(None, '.assets/examples/target-240p.mp4', '.assets/invalid/output.mp4') is None 115 | assert normalize_output_path(None, '.assets/examples/target-240p.mp4', 'invalid') is None 116 | assert normalize_output_path('.assets/examples/source.jpg', '.assets/examples/target-240p.mp4', None) is None 117 | 118 | 119 | def test_is_file() -> None: 120 | assert is_file('.assets/examples/source.jpg') is True 121 | assert is_file('.assets/examples') is False 122 | assert is_file('invalid') is False 123 | 124 | 125 | def test_is_directory() -> None: 126 | assert is_directory('.assets/examples') is True 127 | assert is_directory('.assets/examples/source.jpg') is False 128 | assert is_directory('invalid') is False 129 | 130 | 131 | def test_is_image() -> None: 132 | assert is_image('.assets/examples/source.jpg') is True 133 | assert is_image('.assets/examples/target-240p.mp4') is False 134 | assert is_image('invalid') is False 135 | 136 | 137 | def test_is_video() -> None: 138 | assert is_video('.assets/examples/target-240p.mp4') is True 139 | assert is_video('.assets/examples/source.jpg') is False 140 | assert is_video('invalid') is False 141 | 142 | 143 | def test_get_download_size() -> None: 144 | assert get_download_size('https://github.com/facefusion/facefusion-assets/releases/download/examples/target-240p.mp4') == 191675 145 | assert get_download_size('https://github.com/facefusion/facefusion-assets/releases/download/examples/target-360p.mp4') == 370732 146 | assert get_download_size('invalid') == 0 147 | 148 | 149 | def test_is_download_done() -> None: 150 | assert is_download_done('https://github.com/facefusion/facefusion-assets/releases/download/examples/target-240p.mp4', '.assets/examples/target-240p.mp4') is True 151 | assert is_download_done('https://github.com/facefusion/facefusion-assets/releases/download/examples/target-240p.mp4','invalid') is False 152 | assert is_download_done('invalid', 'invalid') is False 153 | 154 | 155 | def test_encode_execution_providers() -> None: 156 | assert encode_execution_providers([ 'CPUExecutionProvider' ]) == [ 'cpu' ] 157 | 158 | 159 | def test_decode_execution_providers() -> None: 160 | assert decode_execution_providers([ 'cpu' ]) == [ 'CPUExecutionProvider' ] 161 | -------------------------------------------------------------------------------- /tests/test_vision.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | import pytest 3 | 4 | import facefusion.globals 5 | from facefusion.utilities import conditional_download 6 | from facefusion.vision import get_video_frame, detect_fps, count_video_frame_total 7 | 8 | 9 | @pytest.fixture(scope = 'module', autouse = True) 10 | def before_all() -> None: 11 | facefusion.globals.temp_frame_quality = 100 12 | facefusion.globals.trim_frame_start = None 13 | facefusion.globals.trim_frame_end = None 14 | facefusion.globals.temp_frame_format = 'png' 15 | conditional_download('.assets/examples', 16 | [ 17 | 'https://github.com/facefusion/facefusion-assets/releases/download/examples/source.jpg', 18 | 'https://github.com/facefusion/facefusion-assets/releases/download/examples/target-240p.mp4' 19 | ]) 20 | subprocess.run([ 'ffmpeg', '-i', '.assets/examples/target-240p.mp4', '-vf', 'fps=25', '.assets/examples/target-240p-25fps.mp4' ]) 21 | subprocess.run([ 'ffmpeg', '-i', '.assets/examples/target-240p.mp4', '-vf', 'fps=30', '.assets/examples/target-240p-30fps.mp4' ]) 22 | subprocess.run([ 'ffmpeg', '-i', '.assets/examples/target-240p.mp4', '-vf', 'fps=60', '.assets/examples/target-240p-60fps.mp4' ]) 23 | 24 | 25 | @pytest.fixture(scope = 'function', autouse = True) 26 | def before_each() -> None: 27 | facefusion.globals.trim_frame_start = None 28 | facefusion.globals.trim_frame_end = None 29 | facefusion.globals.temp_frame_quality = 90 30 | facefusion.globals.temp_frame_format = 'jpg' 31 | 32 | 33 | def test_get_video_frame() -> None: 34 | assert get_video_frame('.assets/examples/target-240p-25fps.mp4') is not None 35 | assert get_video_frame('invalid') is None 36 | 37 | 38 | def test_detect_fps() -> None: 39 | assert detect_fps('.assets/examples/target-240p-25fps.mp4') == 25.0 40 | assert detect_fps('.assets/examples/target-240p-30fps.mp4') == 30.0 41 | assert detect_fps('.assets/examples/target-240p-60fps.mp4') == 60.0 42 | assert detect_fps('invalid') is None 43 | 44 | 45 | def test_count_video_frame_total() -> None: 46 | assert count_video_frame_total('.assets/examples/target-240p-25fps.mp4') == 270 47 | assert count_video_frame_total('.assets/examples/target-240p-30fps.mp4') == 324 48 | assert count_video_frame_total('.assets/examples/target-240p-60fps.mp4') == 648 49 | assert count_video_frame_total('invalid') == 0 50 | --------------------------------------------------------------------------------