├── .DS_Store ├── .gitattributes ├── Dockerfile.cpu ├── Dockerfile.gpu ├── INSTALL.md ├── LICENSE ├── README.md ├── USAGE.md ├── clip └── .DS_Store ├── clip_faces └── .DS_Store ├── faceswap.py ├── john └── .DS_Store ├── john_faces └── .DS_Store ├── lib ├── .DS_Store ├── FaceFilter.py ├── PixelShuffler.py ├── Serializer.py ├── __init__.py ├── __init__.pyc ├── __pycache__ │ ├── FaceFilter.cpython-36.pyc │ ├── FaceFilter.cpython-37.pyc │ ├── PixelShuffler.cpython-36.pyc │ ├── Serializer.cpython-36.pyc │ ├── Serializer.cpython-37.pyc │ ├── __init__.cpython-36.pyc │ ├── __init__.cpython-37.pyc │ ├── align_eyes.cpython-36.pyc │ ├── align_eyes.cpython-37.pyc │ ├── aligner.cpython-36.pyc │ ├── aligner.cpython-37.pyc │ ├── alignments.cpython-36.pyc │ ├── alignments.cpython-37.pyc │ ├── cli.cpython-36.pyc │ ├── cli.cpython-37.pyc │ ├── detect_blur.cpython-36.pyc │ ├── detect_blur.cpython-37.pyc │ ├── faces_detect.cpython-36.pyc │ ├── faces_detect.cpython-37.pyc │ ├── gpu_stats.cpython-36.pyc │ ├── gpu_stats.cpython-37.pyc │ ├── multithreading.cpython-36.pyc │ ├── multithreading.cpython-37.pyc │ ├── queue_manager.cpython-36.pyc │ ├── queue_manager.cpython-37.pyc │ ├── training_data.cpython-36.pyc │ ├── training_data.cpython-37.pyc │ ├── umeyama.cpython-36.pyc │ ├── umeyama.cpython-37.pyc │ ├── utils.cpython-36.pyc │ └── utils.cpython-37.pyc ├── align_eyes.py ├── aligner.py ├── alignments.py ├── cli.py ├── cli.pyc ├── detect_blur.py ├── faces_detect.py ├── gpu_stats.py ├── gui │ ├── .cache │ │ ├── icons │ │ │ ├── clear.png │ │ │ ├── graph.png │ │ │ ├── move.png │ │ │ ├── open_file.png │ │ │ ├── open_folder.png │ │ │ ├── reset.png │ │ │ ├── save.png │ │ │ └── zoom.png │ │ └── preview │ │ │ └── .keep │ ├── __init__.py │ ├── command.py │ ├── display.py │ ├── display_analysis.py │ ├── display_command.py │ ├── display_graph.py │ ├── display_page.py │ ├── options.py │ ├── stats.py │ ├── statusbar.py │ ├── tooltip.py │ ├── utils.py │ └── wrapper.py ├── multithreading.py ├── queue_manager.py ├── training_data.py ├── umeyama.py └── utils.py ├── model └── .DS_Store ├── output └── .DS_Store ├── plugins ├── __init__.py ├── __init__.pyc ├── __pycache__ │ ├── __init__.cpython-36.pyc │ ├── __init__.cpython-37.pyc │ ├── plugin_loader.cpython-36.pyc │ └── plugin_loader.cpython-37.pyc ├── convert │ ├── Convert_Adjust.py │ ├── Convert_Masked.py │ ├── __init__.py │ └── __pycache__ │ │ ├── Convert_Masked.cpython-36.pyc │ │ └── __init__.cpython-36.pyc ├── extract │ ├── __init__.py │ ├── __pycache__ │ │ ├── __init__.cpython-36.pyc │ │ └── __init__.cpython-37.pyc │ ├── align │ │ ├── .cache │ │ │ └── 2DFAN-4.pb │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-36.pyc │ │ │ ├── __init__.cpython-37.pyc │ │ │ ├── _base.cpython-36.pyc │ │ │ ├── _base.cpython-37.pyc │ │ │ ├── dlib.cpython-36.pyc │ │ │ ├── fan.cpython-36.pyc │ │ │ └── fan.cpython-37.pyc │ │ ├── _base.py │ │ ├── dlib.py │ │ └── fan.py │ └── detect │ │ ├── .cache │ │ ├── det1.npy │ │ ├── det2.npy │ │ └── det3.npy │ │ ├── __init__.py │ │ ├── __pycache__ │ │ ├── __init__.cpython-36.pyc │ │ ├── __init__.cpython-37.pyc │ │ ├── _base.cpython-36.pyc │ │ ├── _base.cpython-37.pyc │ │ ├── mtcnn.cpython-36.pyc │ │ └── mtcnn.cpython-37.pyc │ │ ├── _base.py │ │ ├── dlib_cnn.py │ │ ├── dlib_hog.py │ │ ├── manual.py │ │ └── mtcnn.py ├── model │ ├── Model_GAN │ │ ├── Model.py │ │ ├── Trainer.py │ │ ├── __init__.py │ │ └── instance_normalization.py │ ├── Model_GAN128 │ │ ├── Model.py │ │ ├── Trainer.py │ │ ├── __init__.py │ │ └── instance_normalization.py │ ├── Model_IAE │ │ ├── AutoEncoder.py │ │ ├── Model.py │ │ ├── Trainer.py │ │ └── __init__.py │ ├── Model_LowMem │ │ ├── AutoEncoder.py │ │ ├── Model.py │ │ ├── Trainer.py │ │ └── __init__.py │ ├── Model_Original │ │ ├── AutoEncoder.py │ │ ├── Model.py │ │ ├── Trainer.py │ │ ├── __init__.py │ │ └── __pycache__ │ │ │ ├── AutoEncoder.cpython-36.pyc │ │ │ ├── Model.cpython-36.pyc │ │ │ ├── Trainer.cpython-36.pyc │ │ │ └── __init__.cpython-36.pyc │ ├── Model_OriginalHighRes │ │ ├── Model.py │ │ ├── Trainer.py │ │ ├── __init__.py │ │ ├── _version.py │ │ └── instance_normalization.py │ ├── __init__.py │ └── __pycache__ │ │ └── __init__.cpython-36.pyc ├── plugin_loader.py └── plugin_loader.pyc ├── requirements.txt ├── scripts ├── .DS_Store ├── __init__.py ├── __pycache__ │ ├── __init__.cpython-36.pyc │ ├── __init__.cpython-37.pyc │ ├── convert.cpython-36.pyc │ ├── convert.cpython-37.pyc │ ├── extract.cpython-36.pyc │ ├── extract.cpython-37.pyc │ ├── fsmedia.cpython-36.pyc │ ├── fsmedia.cpython-37.pyc │ └── train.cpython-36.pyc ├── convert.py ├── extract.py ├── fsmedia.py ├── gui.py └── train.py ├── setup.cfg ├── setup.py ├── tools.py └── tools ├── __init__.py ├── alignments.py ├── cli.py ├── effmpeg.py ├── lib_alignments ├── __init__.py ├── annotate.py ├── jobs.py ├── jobs_manual.py └── media.py └── sort.py /.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/.DS_Store -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | -------------------------------------------------------------------------------- /Dockerfile.cpu: -------------------------------------------------------------------------------- 1 | FROM tensorflow/tensorflow:latest-py3 2 | 3 | RUN apt-get update -qq -y \ 4 | && apt-get install -y libsm6 libxrender1 libxext-dev python3-tk\ 5 | && apt-get clean \ 6 | && rm -rf /var/lib/apt/lists/* 7 | 8 | COPY requirements.txt /opt/ 9 | RUN pip3 install cmake 10 | RUN pip3 install dlib --install-option=--yes --install-option=USE_AVX_INSTRUCTIONS 11 | RUN pip3 --no-cache-dir install -r /opt/requirements.txt && rm /opt/requirements.txt 12 | 13 | WORKDIR "/notebooks" 14 | CMD ["/run_jupyter.sh", "--allow-root"] 15 | -------------------------------------------------------------------------------- /Dockerfile.gpu: -------------------------------------------------------------------------------- 1 | FROM tensorflow/tensorflow:latest-gpu-py3 2 | 3 | RUN apt-get update -qq -y \ 4 | && apt-get install -y libsm6 libxrender1 libxext-dev python3-tk\ 5 | && apt-get clean \ 6 | && rm -rf /var/lib/apt/lists/* 7 | 8 | COPY requirements.txt /opt/ 9 | RUN pip3 install cmake 10 | RUN pip3 install dlib --install-option=--yes --install-option=USE_AVX_INSTRUCTIONS 11 | RUN pip3 --no-cache-dir install -r /opt/requirements.txt && rm /opt/requirements.txt 12 | 13 | # patch for tensorflow:latest-gpu-py3 image 14 | RUN cd /usr/local/cuda/lib64 \ 15 | && mv stubs/libcuda.so ./ \ 16 | && ln -s libcuda.so libcuda.so.1 \ 17 | && ldconfig 18 | 19 | WORKDIR "/notebooks" 20 | CMD ["/run_jupyter.sh", "--allow-root"] 21 | -------------------------------------------------------------------------------- /USAGE.md: -------------------------------------------------------------------------------- 1 | **Before attempting any of this, please make sure you have read, understood and completed the [installation instructions](../master/INSTALL.md). If you are experiencing issues, please raise them in the [faceswap-playground](https://github.com/deepfakes/faceswap-playground) repository instead of the main repo.** 2 | 3 | # Workflow 4 | So, you want to swap faces in pictures and videos? Well hold up, because first you gotta understand what this collection of scripts will do, how it does it and what it can't currently do. 5 | 6 | The basic operation of this script is simple. It trains a machine learning model to recognize and transform two faces based on pictures. The machine learning model is our little "bot" that we're teaching to do the actual swapping and the pictures are the "training data" that we use to train it. Note that the bot is primarily processing faces. Other objects might not work. 7 | 8 | So here's our plan. We want to create a reality where Donald Trump lost the presidency to Nic Cage; we have his inauguration video; let's replace Trump with Cage. 9 | 10 | ## Gathering raw data 11 | In order to accomplish this, the bot needs to learn to recognize both face A (Trump) and face B (Nic Cage). By default, the bot doesn't know what a Trump or a Nic Cage looks like. So we need to show it some pictures and let it guess which is which. So we need pictures of both of these faces first. 12 | 13 | A possible source is Google, DuckDuckGo or Bing image search. There are scripts to download large amounts of images. Alternatively, if you have a video of the person you're looking for (from interviews, public speeches, or movies), you can convert this video to still images and use those. see [Extracting video frames](#Extracting_video_frames) for more information. 14 | 15 | Feel free to list your image sets in the [faceswap-playground](https://github.com/deepfakes/faceswap-playground), or add more methods to this file. 16 | 17 | So now we have a folder full of pictures of Trump and a separate folder of Nic Cage. Let's save them in our directory where we put the faceswap project. Example: `~/faceswap/photo/trump` and `~/faceswap/photo/cage` 18 | 19 | ## EXTRACT 20 | So here's a problem. We have a ton of pictures of both our subjects, but they're just pictures of them doing stuff or in an environment with other people. Their bodies are on there, they're on there with other people... It's a mess. We can only train our bot if the data we have is consistent and focusses on the subject we want to swap. This is where faceswap first comes in. 21 | 22 | ```bash 23 | # To convert trump: 24 | python faceswap.py extract -i ~/faceswap/photo/trump -o ~/faceswap/data/trump 25 | # To convert cage: 26 | python faceswap.py extract -i ~/faceswap/photo/cage -o ~/faceswap/data/cage 27 | ``` 28 | 29 | We specify our photo input directory and the output folder where our training data will be saved. The script will then try its best to recognize face landmarks, crop the image to that size, and save it to the output folder. Note: this script will make grabbing test data much easier, but it is not perfect. It will (incorrectly) detect multiple faces in some photos and does not recognize if the face is the person who we want to swap. Therefore: **Always check your training data before you start training.** The training data will influence how good your model will be at swapping. 30 | 31 | You can see the full list of arguments for extracting via help flag. i.e. 32 | 33 | ```bash 34 | python faceswap.py extract -h 35 | ``` 36 | 37 | ## TRAIN 38 | The training process will take the longest, especially on CPU. We specify the folders where the two faces are, and where we will save our training model. It will start hammering the training data once you run the command. I personally really like to go by the preview and quit the processing once I'm happy with the results. 39 | 40 | ```bash 41 | python faceswap.py train -A ~/faceswap/data/trump -B ~/faceswap/data/cage -m ~/faceswap/models/ 42 | # or -p to show a preview 43 | python faceswap.py train -A ~/faceswap/data/trump -B ~/faceswap/data/cage -m ~/faceswap/models/ -p 44 | ``` 45 | 46 | If you use the preview feature, select the preview window and press ENTER to save your processed data and quit gracefully. Without the preview enabled, you might have to forcefully quit by hitting Ctrl+C to cancel the command. Note that it will save the model once it's gone through about 100 iterations, which can take quite a while. So make sure you save before stopping the process. 47 | 48 | You can see the full list of arguments for training via help flag. i.e. 49 | 50 | ```bash 51 | python faceswap.py train -h 52 | ``` 53 | 54 | ## CONVERT 55 | Now that we're happy with our trained model, we can convert our video. How does it work? Similarly to the extraction script, actually! The conversion script basically detects a face in a picture using the same algorithm, quickly crops the image to the right size, runs our bot on this cropped image of the face it has found, and then (crudely) pastes the processed face back into the picture. 56 | 57 | Remember those initial pictures we had of Trump? Let's try swapping a face there. We will use that directory as our input directory, create a new folder where the output will be saved, and tell them which model to use. 58 | 59 | ```bash 60 | python faceswap.py convert -i ~/faceswap/photo/trump/ -o ~/faceswap/output/ -m ~/faceswap/models/ 61 | ``` 62 | 63 | It should now start swapping faces of all these pictures. 64 | 65 | You can see the full list of arguments available for converting via help flag. i.e. 66 | 67 | ```bash 68 | python faceswap.py convert -h 69 | ``` 70 | 71 | ## GUI 72 | All of the above commands and options can be run from the GUI. This is launched with: 73 | ```bash 74 | python faceswap.py gui 75 | ``` 76 | 77 | 78 | 79 | ## Video's 80 | A video is just a series of pictures in the form of frames. Therefore you can gather the raw images from them for your dataset or combine your results into a video. 81 | 82 | ## EFFMPEG 83 | You can perform various video processes with the built in effmpeg tool. You can see the full list of arguments available by running: 84 | ```bash 85 | python tools.py effmpeg -h 86 | ``` 87 | 88 | ## Extracting video frames with FFMPEG 89 | Alternatively you can split a video into seperate frames using [ffmpeg](https://www.ffmpeg.org) for instance. Below is an example command to process a video to seperate frames. 90 | 91 | ```bash 92 | ffmpeg -i /path/to/my/video.mp4 /path/to/output/video-frame-%d.png 93 | ``` 94 | 95 | ## Generating a video 96 | If you split a video, using [ffmpeg](https://www.ffmpeg.org) for example, and used them as a target for swapping faces onto you can combine these frames again. The command below stitches the png frames back into a single video again. 97 | 98 | ```bash 99 | ffmpeg -i video-frame-%0d.png -c:v libx264 -vf "fps=25,format=yuv420p" out.mp4 100 | ``` 101 | 102 | ## Notes 103 | This guide is far from complete. Functionality may change over time, and new dependencies are added and removed as time goes on. 104 | 105 | If you are experiencing issues, please raise them in the [faceswap-playground](https://github.com/deepfakes/faceswap-playground) repository instead of the main repo. 106 | -------------------------------------------------------------------------------- /clip/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/clip/.DS_Store -------------------------------------------------------------------------------- /clip_faces/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/clip_faces/.DS_Store -------------------------------------------------------------------------------- /faceswap.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ The master faceswap.py script """ 3 | import sys 4 | 5 | import lib.cli as cli 6 | 7 | if sys.version_info[0] < 3: 8 | raise Exception("This program requires at least python3.2") 9 | if sys.version_info[0] == 3 and sys.version_info[1] < 2: 10 | raise Exception("This program requires at least python3.2") 11 | 12 | 13 | def bad_args(args): 14 | """ Print help on bad arguments """ 15 | PARSER.print_help() 16 | exit(0) 17 | 18 | 19 | if __name__ == "__main__": 20 | PARSER = cli.FullHelpArgumentParser() 21 | SUBPARSER = PARSER.add_subparsers() 22 | EXTRACT = cli.ExtractArgs(SUBPARSER, 23 | "extract", 24 | "Extract the faces from pictures") 25 | TRAIN = cli.TrainArgs(SUBPARSER, 26 | "train", 27 | "This command trains the model for the two faces A and B") 28 | CONVERT = cli.ConvertArgs(SUBPARSER, 29 | "convert", 30 | "Convert a source image to a new one with the face swapped") 31 | GUI = cli.GuiArgs(SUBPARSER, 32 | "gui", 33 | "Launch the Faceswap Graphical User Interface") 34 | PARSER.set_defaults(func=bad_args) 35 | ARGUMENTS = PARSER.parse_args() 36 | ARGUMENTS.func(ARGUMENTS) 37 | -------------------------------------------------------------------------------- /john/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/john/.DS_Store -------------------------------------------------------------------------------- /john_faces/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/john_faces/.DS_Store -------------------------------------------------------------------------------- /lib/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/lib/.DS_Store -------------------------------------------------------------------------------- /lib/FaceFilter.py: -------------------------------------------------------------------------------- 1 | # import dlib 2 | # import numpy as np 3 | import face_recognition 4 | # import face_recognition_models 5 | 6 | def avg(arr): 7 | return sum(arr)*1.0/len(arr) 8 | 9 | class FaceFilter(): 10 | def __init__(self, reference_file_paths, nreference_file_paths, threshold = 0.6): 11 | images = list(map(face_recognition.load_image_file, reference_file_paths)) 12 | nimages = list(map(face_recognition.load_image_file, nreference_file_paths)) 13 | # Note: we take only first face, so the reference file should only contain one face. 14 | self.encodings = list(map(lambda im: face_recognition.face_encodings(im)[0], images)) 15 | self.nencodings = list(map(lambda im: face_recognition.face_encodings(im)[0], nimages)) 16 | self.threshold = threshold 17 | 18 | def check(self, detected_face): 19 | # we could use detected landmarks, but I did not manage to do so. TODO The copy/paste below should help 20 | encodings = face_recognition.face_encodings(detected_face.image) 21 | if encodings is not None and len(encodings) > 0: 22 | distances = list(face_recognition.face_distance(self.encodings, encodings[0])) 23 | distance = avg(distances) 24 | mindistance = min(distances) 25 | maxdistance = max(distances) 26 | if distance > self.threshold: 27 | print("Distance above threshold: %f < %f" % (distance, self.threshold)) 28 | return False 29 | if len(self.nencodings) > 0: 30 | ndistances = list(face_recognition.face_distance(self.nencodings, encodings[0])) 31 | ndistance = avg(ndistances) 32 | nmindistance = min(ndistances) 33 | nmaxdistance = max(ndistances) 34 | if (mindistance > nmindistance): 35 | print("Distance to negative sample is smaller") 36 | return False 37 | if (distance > ndistance): 38 | print("Average distance to negative sample is smaller") 39 | return False 40 | # k-nn classifier 41 | K=min(5, min(len(distances), len(ndistances)) + 1) 42 | N=sum(list(map(lambda x: x[0], 43 | list(sorted([(1,d) for d in distances] + [(0,d) for d in ndistances], 44 | key=lambda x: x[1]))[:K]))) 45 | ratio = N/K 46 | if (ratio < 0.5): 47 | print("K-nn is %.2f" % ratio) 48 | return False 49 | return True 50 | else: 51 | print("No face encodings found") 52 | return False 53 | 54 | # # Copy/Paste (mostly) from private method in face_recognition 55 | # face_recognition_model = face_recognition_models.face_recognition_model_location() 56 | # face_encoder = dlib.face_recognition_model_v1(face_recognition_model) 57 | 58 | # def convert(detected_face): 59 | # return np.array(face_encoder.compute_face_descriptor(detected_face.image, detected_face.landmarks, 1)) 60 | # # end of Copy/Paste 61 | -------------------------------------------------------------------------------- /lib/PixelShuffler.py: -------------------------------------------------------------------------------- 1 | # PixelShuffler layer for Keras 2 | # by t-ae 3 | # https://gist.github.com/t-ae/6e1016cc188104d123676ccef3264981 4 | 5 | from keras.utils import conv_utils 6 | from keras.engine.topology import Layer 7 | import keras.backend as K 8 | 9 | 10 | class PixelShuffler(Layer): 11 | def __init__(self, size=(2, 2), data_format=None, **kwargs): 12 | super(PixelShuffler, self).__init__(**kwargs) 13 | self.data_format = K.normalize_data_format(data_format) 14 | self.size = conv_utils.normalize_tuple(size, 2, 'size') 15 | 16 | def call(self, inputs): 17 | 18 | input_shape = K.int_shape(inputs) 19 | if len(input_shape) != 4: 20 | raise ValueError('Inputs should have rank ' + 21 | str(4) + 22 | '; Received input shape:', str(input_shape)) 23 | 24 | if self.data_format == 'channels_first': 25 | batch_size, c, h, w = input_shape 26 | if batch_size is None: 27 | batch_size = -1 28 | rh, rw = self.size 29 | oh, ow = h * rh, w * rw 30 | oc = c // (rh * rw) 31 | 32 | out = K.reshape(inputs, (batch_size, rh, rw, oc, h, w)) 33 | out = K.permute_dimensions(out, (0, 3, 4, 1, 5, 2)) 34 | out = K.reshape(out, (batch_size, oc, oh, ow)) 35 | return out 36 | 37 | elif self.data_format == 'channels_last': 38 | batch_size, h, w, c = input_shape 39 | if batch_size is None: 40 | batch_size = -1 41 | rh, rw = self.size 42 | oh, ow = h * rh, w * rw 43 | oc = c // (rh * rw) 44 | 45 | out = K.reshape(inputs, (batch_size, h, w, rh, rw, oc)) 46 | out = K.permute_dimensions(out, (0, 1, 3, 2, 4, 5)) 47 | out = K.reshape(out, (batch_size, oh, ow, oc)) 48 | return out 49 | 50 | def compute_output_shape(self, input_shape): 51 | 52 | if len(input_shape) != 4: 53 | raise ValueError('Inputs should have rank ' + 54 | str(4) + 55 | '; Received input shape:', str(input_shape)) 56 | 57 | if self.data_format == 'channels_first': 58 | height = input_shape[2] * self.size[0] if input_shape[2] is not None else None 59 | width = input_shape[3] * self.size[1] if input_shape[3] is not None else None 60 | channels = input_shape[1] // self.size[0] // self.size[1] 61 | 62 | if channels * self.size[0] * self.size[1] != input_shape[1]: 63 | raise ValueError('channels of input and size are incompatible') 64 | 65 | return (input_shape[0], 66 | channels, 67 | height, 68 | width) 69 | 70 | elif self.data_format == 'channels_last': 71 | height = input_shape[1] * self.size[0] if input_shape[1] is not None else None 72 | width = input_shape[2] * self.size[1] if input_shape[2] is not None else None 73 | channels = input_shape[3] // self.size[0] // self.size[1] 74 | 75 | if channels * self.size[0] * self.size[1] != input_shape[3]: 76 | raise ValueError('channels of input and size are incompatible') 77 | 78 | return (input_shape[0], 79 | height, 80 | width, 81 | channels) 82 | 83 | def get_config(self): 84 | config = {'size': self.size, 85 | 'data_format': self.data_format} 86 | base_config = super(PixelShuffler, self).get_config() 87 | 88 | return dict(list(base_config.items()) + list(config.items())) 89 | -------------------------------------------------------------------------------- /lib/Serializer.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Library providing convenient classes and methods for writing data to files. 4 | """ 5 | import sys 6 | import json 7 | import pickle 8 | 9 | try: 10 | import yaml 11 | except ImportError: 12 | yaml = None 13 | 14 | 15 | class Serializer(object): 16 | ext = "" 17 | woptions = "" 18 | roptions = "" 19 | 20 | @classmethod 21 | def marshal(cls, input_data): 22 | raise NotImplementedError() 23 | 24 | @classmethod 25 | def unmarshal(cls, input_string): 26 | raise NotImplementedError() 27 | 28 | 29 | class YAMLSerializer(Serializer): 30 | ext = "yml" 31 | woptions = "w" 32 | roptions = "r" 33 | 34 | @classmethod 35 | def marshal(cls, input_data): 36 | return yaml.dump(input_data, default_flow_style=False) 37 | 38 | @classmethod 39 | def unmarshal(cls, input_string): 40 | return yaml.load(input_string) 41 | 42 | 43 | class JSONSerializer(Serializer): 44 | ext = "json" 45 | woptions = "w" 46 | roptions = "r" 47 | 48 | @classmethod 49 | def marshal(cls, input_data): 50 | return json.dumps(input_data, indent=2) 51 | 52 | @classmethod 53 | def unmarshal(cls, input_string): 54 | return json.loads(input_string) 55 | 56 | 57 | class PickleSerializer(Serializer): 58 | ext = "p" 59 | woptions = "wb" 60 | roptions = "rb" 61 | 62 | @classmethod 63 | def marshal(cls, input_data): 64 | return pickle.dumps(input_data) 65 | 66 | @classmethod 67 | def unmarshal(cls, input_bytes): 68 | return pickle.loads(input_bytes) 69 | 70 | 71 | def get_serializer(serializer): 72 | if serializer == "json": 73 | return JSONSerializer 74 | elif serializer == "pickle": 75 | return PickleSerializer 76 | elif serializer == "yaml" and yaml is not None: 77 | return YAMLSerializer 78 | elif serializer == "yaml" and yaml is None: 79 | print("You must have PyYAML installed to use YAML as the serializer.\n" 80 | "Switching to JSON as the serializer.", file=sys.stderr) 81 | return JSONSerializer 82 | 83 | 84 | def get_serializer_from_ext(ext): 85 | if ext == ".json": 86 | return JSONSerializer 87 | elif ext == ".p": 88 | return PickleSerializer 89 | elif ext in (".yaml", ".yml") and yaml is not None: 90 | return YAMLSerializer 91 | elif ext in (".yaml", ".yml") and yaml is None: 92 | print("You must have PyYAML installed to use YAML as the serializer.\n" 93 | "Switching to JSON as the serializer.", file=sys.stderr) 94 | return JSONSerializer 95 | -------------------------------------------------------------------------------- /lib/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/lib/__init__.py -------------------------------------------------------------------------------- /lib/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/lib/__init__.pyc -------------------------------------------------------------------------------- /lib/__pycache__/FaceFilter.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/lib/__pycache__/FaceFilter.cpython-36.pyc -------------------------------------------------------------------------------- /lib/__pycache__/FaceFilter.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/lib/__pycache__/FaceFilter.cpython-37.pyc -------------------------------------------------------------------------------- /lib/__pycache__/PixelShuffler.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/lib/__pycache__/PixelShuffler.cpython-36.pyc -------------------------------------------------------------------------------- /lib/__pycache__/Serializer.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/lib/__pycache__/Serializer.cpython-36.pyc -------------------------------------------------------------------------------- /lib/__pycache__/Serializer.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/lib/__pycache__/Serializer.cpython-37.pyc -------------------------------------------------------------------------------- /lib/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/lib/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /lib/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/lib/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /lib/__pycache__/align_eyes.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/lib/__pycache__/align_eyes.cpython-36.pyc -------------------------------------------------------------------------------- /lib/__pycache__/align_eyes.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/lib/__pycache__/align_eyes.cpython-37.pyc -------------------------------------------------------------------------------- /lib/__pycache__/aligner.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/lib/__pycache__/aligner.cpython-36.pyc -------------------------------------------------------------------------------- /lib/__pycache__/aligner.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/lib/__pycache__/aligner.cpython-37.pyc -------------------------------------------------------------------------------- /lib/__pycache__/alignments.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/lib/__pycache__/alignments.cpython-36.pyc -------------------------------------------------------------------------------- /lib/__pycache__/alignments.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/lib/__pycache__/alignments.cpython-37.pyc -------------------------------------------------------------------------------- /lib/__pycache__/cli.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/lib/__pycache__/cli.cpython-36.pyc -------------------------------------------------------------------------------- /lib/__pycache__/cli.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/lib/__pycache__/cli.cpython-37.pyc -------------------------------------------------------------------------------- /lib/__pycache__/detect_blur.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/lib/__pycache__/detect_blur.cpython-36.pyc -------------------------------------------------------------------------------- /lib/__pycache__/detect_blur.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/lib/__pycache__/detect_blur.cpython-37.pyc -------------------------------------------------------------------------------- /lib/__pycache__/faces_detect.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/lib/__pycache__/faces_detect.cpython-36.pyc -------------------------------------------------------------------------------- /lib/__pycache__/faces_detect.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/lib/__pycache__/faces_detect.cpython-37.pyc -------------------------------------------------------------------------------- /lib/__pycache__/gpu_stats.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/lib/__pycache__/gpu_stats.cpython-36.pyc -------------------------------------------------------------------------------- /lib/__pycache__/gpu_stats.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/lib/__pycache__/gpu_stats.cpython-37.pyc -------------------------------------------------------------------------------- /lib/__pycache__/multithreading.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/lib/__pycache__/multithreading.cpython-36.pyc -------------------------------------------------------------------------------- /lib/__pycache__/multithreading.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/lib/__pycache__/multithreading.cpython-37.pyc -------------------------------------------------------------------------------- /lib/__pycache__/queue_manager.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/lib/__pycache__/queue_manager.cpython-36.pyc -------------------------------------------------------------------------------- /lib/__pycache__/queue_manager.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/lib/__pycache__/queue_manager.cpython-37.pyc -------------------------------------------------------------------------------- /lib/__pycache__/training_data.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/lib/__pycache__/training_data.cpython-36.pyc -------------------------------------------------------------------------------- /lib/__pycache__/training_data.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/lib/__pycache__/training_data.cpython-37.pyc -------------------------------------------------------------------------------- /lib/__pycache__/umeyama.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/lib/__pycache__/umeyama.cpython-36.pyc -------------------------------------------------------------------------------- /lib/__pycache__/umeyama.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/lib/__pycache__/umeyama.cpython-37.pyc -------------------------------------------------------------------------------- /lib/__pycache__/utils.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/lib/__pycache__/utils.cpython-36.pyc -------------------------------------------------------------------------------- /lib/__pycache__/utils.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/lib/__pycache__/utils.cpython-37.pyc -------------------------------------------------------------------------------- /lib/align_eyes.py: -------------------------------------------------------------------------------- 1 | # Code borrowed from https://github.com/jrosebr1/imutils/blob/d5cb29d02cf178c399210d5a139a821dfb0ae136/imutils/face_utils/helpers.py 2 | """ 3 | The MIT License (MIT) 4 | 5 | Copyright (c) 2015-2016 Adrian Rosebrock, http://www.pyimagesearch.com 6 | 7 | Permission is hereby granted, free of charge, to any person obtaining a copy 8 | of this software and associated documentation files (the "Software"), to deal 9 | in the Software without restriction, including without limitation the rights 10 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 11 | copies of the Software, and to permit persons to whom the Software is 12 | furnished to do so, subject to the following conditions: 13 | 14 | The above copyright notice and this permission notice shall be included in 15 | all copies or substantial portions of the Software. 16 | 17 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 20 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 23 | THE SOFTWARE. 24 | """ 25 | 26 | from collections import OrderedDict 27 | import numpy as np 28 | import cv2 29 | 30 | # define a dictionary that maps the indexes of the facial 31 | # landmarks to specific face regions 32 | FACIAL_LANDMARKS_IDXS = OrderedDict([ 33 | ("mouth", (48, 68)), 34 | ("right_eyebrow", (17, 22)), 35 | ("left_eyebrow", (22, 27)), 36 | ("right_eye", (36, 42)), 37 | ("left_eye", (42, 48)), 38 | ("nose", (27, 36)), 39 | ("jaw", (0, 17)), 40 | ("chin", (8, 11)) 41 | ]) 42 | 43 | # Returns a rotation matrix that when applied to the 68 input facial landmarks 44 | # results in landmarks with eyes aligned horizontally 45 | def align_eyes(landmarks, size): 46 | desiredLeftEye = (0.35, 0.35) # (y, x) value 47 | desiredFaceWidth = desiredFaceHeight = size 48 | 49 | # extract the left and right eye (x, y)-coordinates 50 | (lStart, lEnd) = FACIAL_LANDMARKS_IDXS["left_eye"] 51 | (rStart, rEnd) = FACIAL_LANDMARKS_IDXS["right_eye"] 52 | leftEyePts = landmarks[lStart:lEnd] 53 | rightEyePts = landmarks[rStart:rEnd] 54 | 55 | # compute the center of mass for each eye 56 | leftEyeCenter = leftEyePts.mean(axis=0).astype("int") 57 | rightEyeCenter = rightEyePts.mean(axis=0).astype("int") 58 | 59 | # compute the angle between the eye centroids 60 | dY = rightEyeCenter[0,1] - leftEyeCenter[0,1] 61 | dX = rightEyeCenter[0,0] - leftEyeCenter[0,0] 62 | angle = np.degrees(np.arctan2(dY, dX)) - 180 63 | 64 | # compute center (x, y)-coordinates (i.e., the median point) 65 | # between the two eyes in the input image 66 | eyesCenter = ((leftEyeCenter[0,0] + rightEyeCenter[0,0]) // 2, (leftEyeCenter[0,1] + rightEyeCenter[0,1]) // 2) 67 | 68 | # grab the rotation matrix for rotating and scaling the face 69 | M = cv2.getRotationMatrix2D(eyesCenter, angle, 1.0) 70 | 71 | return M 72 | -------------------------------------------------------------------------------- /lib/aligner.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ Aligner for faceswap.py """ 3 | 4 | import cv2 5 | import numpy as np 6 | 7 | from lib.umeyama import umeyama 8 | from lib.align_eyes import align_eyes as func_align_eyes, FACIAL_LANDMARKS_IDXS 9 | 10 | MEAN_FACE_X = np.array([ 11 | 0.000213256, 0.0752622, 0.18113, 0.29077, 0.393397, 0.586856, 0.689483, 12 | 0.799124, 0.904991, 0.98004, 0.490127, 0.490127, 0.490127, 0.490127, 13 | 0.36688, 0.426036, 0.490127, 0.554217, 0.613373, 0.121737, 0.187122, 14 | 0.265825, 0.334606, 0.260918, 0.182743, 0.645647, 0.714428, 0.793132, 15 | 0.858516, 0.79751, 0.719335, 0.254149, 0.340985, 0.428858, 0.490127, 16 | .551395, 0.639268, 0.726104, 0.642159, 0.556721, 0.490127, 0.423532, 17 | 0.338094, 0.290379, 0.428096, 0.490127, 0.552157, 0.689874, 0.553364, 18 | 0.490127, 0.42689]) 19 | 20 | MEAN_FACE_Y = np.array([ 21 | 0.106454, 0.038915, 0.0187482, 0.0344891, 0.0773906, 0.0773906, 0.0344891, 22 | 0.0187482, 0.038915, 0.106454, 0.203352, 0.307009, 0.409805, 0.515625, 23 | 0.587326, 0.609345, 0.628106, 0.609345, 0.587326, 0.216423, 0.178758, 24 | 0.179852, 0.231733, 0.245099, 0.244077, 0.231733, 0.179852, 0.178758, 25 | 0.216423, 0.244077, 0.245099, 0.780233, 0.745405, 0.727388, 0.742578, 26 | 0.727388, 0.745405, 0.780233, 0.864805, 0.902192, 0.909281, 0.902192, 27 | 0.864805, 0.784792, 0.778746, 0.785343, 0.778746, 0.784792, 0.824182, 28 | 0.831803, 0.824182]) 29 | 30 | LANDMARKS_2D = np.stack([MEAN_FACE_X, MEAN_FACE_Y], axis=1) 31 | 32 | 33 | class Extract(): 34 | """ Based on the original https://www.reddit.com/r/deepfakes/ 35 | code sample + contribs """ 36 | 37 | def extract(self, image, face, size, align_eyes): 38 | """ Extract a face from an image """ 39 | alignment = get_align_mat(face, size, align_eyes) 40 | extracted = self.transform(image, alignment, size, 48) 41 | return extracted, alignment 42 | 43 | @staticmethod 44 | def transform_matrix(mat, size, padding): 45 | """ Transform the matrix for current size and padding """ 46 | matrix = mat * (size - 2 * padding) 47 | matrix[:, 2] += padding 48 | return matrix 49 | 50 | def transform(self, image, mat, size, padding=0): 51 | """ Transform Image """ 52 | matrix = self.transform_matrix(mat, size, padding) 53 | return cv2.warpAffine( # pylint: disable=no-member 54 | image, matrix, (size, size)) 55 | 56 | def transform_points(self, points, mat, size, padding=0): 57 | """ Transform points along matrix """ 58 | matrix = self.transform_matrix(mat, size, padding) 59 | points = np.expand_dims(points, axis=1) 60 | points = cv2.transform( # pylint: disable=no-member 61 | points, matrix, points.shape) 62 | return np.squeeze(points) 63 | 64 | def get_original_roi(self, mat, size, padding=0): 65 | """ Return the square aligned box location on the original 66 | image """ 67 | matrix = self.transform_matrix(mat, size, padding) 68 | points = np.array([[0, 0], 69 | [0, size - 1], 70 | [size - 1, size - 1], 71 | [size - 1, 0]], np.int32) 72 | points = points.reshape((-1, 1, 2)) 73 | matrix = cv2.invertAffineTransform(matrix) # pylint: disable=no-member 74 | return cv2.transform(points, matrix) # pylint: disable=no-member 75 | 76 | @staticmethod 77 | def get_feature_mask(aligned_landmarks_68, size, 78 | padding=0, dilation=30): 79 | """ Return the face feature mask """ 80 | # pylint: disable=no-member 81 | scale = size - 2*padding 82 | translation = padding 83 | pad_mat = np.matrix([[scale, 0.0, translation], 84 | [0.0, scale, translation]]) 85 | aligned_landmarks_68 = np.expand_dims(aligned_landmarks_68, axis=1) 86 | aligned_landmarks_68 = cv2.transform(aligned_landmarks_68, 87 | pad_mat, 88 | aligned_landmarks_68.shape) 89 | aligned_landmarks_68 = np.squeeze(aligned_landmarks_68) 90 | 91 | (l_start, l_end) = FACIAL_LANDMARKS_IDXS["left_eye"] 92 | (r_start, r_end) = FACIAL_LANDMARKS_IDXS["right_eye"] 93 | (m_start, m_end) = FACIAL_LANDMARKS_IDXS["mouth"] 94 | (n_start, n_end) = FACIAL_LANDMARKS_IDXS["nose"] 95 | (lb_start, lb_end) = FACIAL_LANDMARKS_IDXS["left_eyebrow"] 96 | (rb_start, rb_end) = FACIAL_LANDMARKS_IDXS["right_eyebrow"] 97 | (c_start, c_end) = FACIAL_LANDMARKS_IDXS["chin"] 98 | 99 | l_eye_points = aligned_landmarks_68[l_start:l_end].tolist() 100 | l_brow_points = aligned_landmarks_68[lb_start:lb_end].tolist() 101 | r_eye_points = aligned_landmarks_68[r_start:r_end].tolist() 102 | r_brow_points = aligned_landmarks_68[rb_start:rb_end].tolist() 103 | nose_points = aligned_landmarks_68[n_start:n_end].tolist() 104 | chin_points = aligned_landmarks_68[c_start:c_end].tolist() 105 | mouth_points = aligned_landmarks_68[m_start:m_end].tolist() 106 | l_eye_points = l_eye_points + l_brow_points 107 | r_eye_points = r_eye_points + r_brow_points 108 | mouth_points = mouth_points + nose_points + chin_points 109 | 110 | l_eye_hull = cv2.convexHull(np.array(l_eye_points).reshape( 111 | (-1, 2)).astype(int)).flatten().reshape((-1, 2)) 112 | r_eye_hull = cv2.convexHull(np.array(r_eye_points).reshape( 113 | (-1, 2)).astype(int)).flatten().reshape((-1, 2)) 114 | mouth_hull = cv2.convexHull(np.array(mouth_points).reshape( 115 | (-1, 2)).astype(int)).flatten().reshape((-1, 2)) 116 | 117 | mask = np.zeros((size, size, 3), dtype=float) 118 | cv2.fillConvexPoly(mask, l_eye_hull, (1, 1, 1)) 119 | cv2.fillConvexPoly(mask, r_eye_hull, (1, 1, 1)) 120 | cv2.fillConvexPoly(mask, mouth_hull, (1, 1, 1)) 121 | 122 | if dilation > 0: 123 | kernel = np.ones((dilation, dilation), np.uint8) 124 | mask = cv2.dilate(mask, kernel, iterations=1) 125 | 126 | return mask 127 | 128 | 129 | def get_align_mat(face, size, should_align_eyes): 130 | """ Return the alignment Matrix """ 131 | mat_umeyama = umeyama(np.array(face.landmarks_as_xy()[17:]), 132 | LANDMARKS_2D, 133 | True)[0:2] 134 | 135 | if should_align_eyes is False: 136 | return mat_umeyama 137 | 138 | mat_umeyama = mat_umeyama * size 139 | 140 | # Convert to matrix 141 | landmarks = np.matrix(face.landmarks_as_xy()) 142 | 143 | # cv2 expects points to be in the form 144 | # np.array([ [[x1, y1]], [[x2, y2]], ... ]), we'll expand the dim 145 | landmarks = np.expand_dims(landmarks, axis=1) 146 | 147 | # Align the landmarks using umeyama 148 | umeyama_landmarks = cv2.transform( # pylint: disable=no-member 149 | landmarks, 150 | mat_umeyama, 151 | landmarks.shape) 152 | 153 | # Determine a rotation matrix to align eyes horizontally 154 | mat_align_eyes = func_align_eyes(umeyama_landmarks, size) 155 | 156 | # Extend the 2x3 transform matrices to 3x3 so we can multiply them 157 | # and combine them as one 158 | mat_umeyama = np.matrix(mat_umeyama) 159 | mat_umeyama.resize((3, 3)) 160 | mat_align_eyes = np.matrix(mat_align_eyes) 161 | mat_align_eyes.resize((3, 3)) 162 | mat_umeyama[2] = mat_align_eyes[2] = [0, 0, 1] 163 | 164 | # Combine the umeyama transform with the extra rotation matrix 165 | transform_mat = mat_align_eyes * mat_umeyama 166 | 167 | # Remove the extra row added, shape needs to be 2x3 168 | transform_mat = np.delete(transform_mat, 2, 0) 169 | transform_mat = transform_mat / size 170 | return transform_mat 171 | -------------------------------------------------------------------------------- /lib/cli.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/lib/cli.pyc -------------------------------------------------------------------------------- /lib/detect_blur.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | 3 | def variance_of_laplacian(image): 4 | # compute the Laplacian of the image and then return the focus 5 | # measure, which is simply the variance of the Laplacian 6 | return cv2.Laplacian(image, cv2.CV_64F).var() 7 | 8 | def is_blurry(image, threshold): 9 | # Convert to grayscale, and compute the 10 | # focus measure of the image using the 11 | # Variance of Laplacian method 12 | gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) 13 | fm = variance_of_laplacian(gray) 14 | 15 | # if the focus measure is less than the supplied threshold, 16 | # then the image should be considered "blurry" 17 | return (fm < threshold, fm) 18 | -------------------------------------------------------------------------------- /lib/faces_detect.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin python3 2 | """ Face and landmarks detection for faceswap.py """ 3 | 4 | from dlib import rectangle as d_rectangle # pylint: disable=no-name-in-module 5 | from lib.aligner import Extract as AlignerExtract, get_align_mat 6 | 7 | 8 | class DetectedFace(): 9 | """ Detected face and landmark information """ 10 | def __init__( # pylint: disable=invalid-name 11 | self, image=None, x=None, w=None, y=None, h=None, 12 | frame_dims=None, landmarksXY=None): 13 | self.image = image 14 | self.x = x 15 | self.w = w 16 | self.y = y 17 | self.h = h 18 | self.frame_dims = frame_dims 19 | self.landmarksXY = landmarksXY 20 | 21 | self.aligned = dict() 22 | 23 | def landmarks_as_xy(self): 24 | """ Landmarks as XY """ 25 | return self.landmarksXY 26 | 27 | def to_dlib_rect(self): 28 | """ Return Bounding Box as Dlib Rectangle """ 29 | left = self.x 30 | top = self.y 31 | right = self.x + self.w 32 | bottom = self.y + self.h 33 | return d_rectangle(left, top, right, bottom) 34 | 35 | def from_dlib_rect(self, d_rect, image=None): 36 | """ Set Bounding Box from a Dlib Rectangle """ 37 | if not isinstance(d_rect, d_rectangle): 38 | raise ValueError("Supplied Bounding Box is not a dlib.rectangle.") 39 | self.x = d_rect.left() 40 | self.w = d_rect.right() - d_rect.left() 41 | self.y = d_rect.top() 42 | self.h = d_rect.bottom() - d_rect.top() 43 | if image.any(): 44 | self.image_to_face(image) 45 | 46 | def image_to_face(self, image): 47 | """ Crop an image around bounding box to the face 48 | and capture it's dimensions """ 49 | self.image = image[self.y: self.y + self.h, 50 | self.x: self.x + self.w] 51 | 52 | def to_alignment(self): 53 | """ Convert a detected face to alignment dict """ 54 | alignment = dict() 55 | alignment["x"] = self.x 56 | alignment["w"] = self.w 57 | alignment["y"] = self.y 58 | alignment["h"] = self.h 59 | alignment["frame_dims"] = self.frame_dims 60 | alignment["landmarksXY"] = self.landmarksXY 61 | return alignment 62 | 63 | def from_alignment(self, alignment, image=None): 64 | """ Convert a face alignment to detected face object """ 65 | self.x = alignment["x"] 66 | self.w = alignment["w"] 67 | self.y = alignment["y"] 68 | self.h = alignment["h"] 69 | self.frame_dims = alignment["frame_dims"] 70 | self.landmarksXY = alignment["landmarksXY"] 71 | if image.any(): 72 | self.image_to_face(image) 73 | 74 | # <<< Aligned Face methods and properties >>> # 75 | def load_aligned(self, image, size=256, padding=48, align_eyes=False): 76 | """ No need to load aligned information for all uses of this 77 | class, so only call this to load the information for easy 78 | reference to aligned properties for this face """ 79 | self.aligned["size"] = size 80 | self.aligned["padding"] = padding 81 | self.aligned["align_eyes"] = align_eyes 82 | self.aligned["matrix"] = get_align_mat(self, size, align_eyes) 83 | self.aligned["face"] = AlignerExtract().transform( 84 | image, 85 | self.aligned["matrix"], 86 | size, 87 | padding) 88 | 89 | @property 90 | def original_roi(self): 91 | """ Return the square aligned box location on the original 92 | image """ 93 | return AlignerExtract().get_original_roi(self.aligned["matrix"], 94 | self.aligned["size"], 95 | self.aligned["padding"]) 96 | 97 | @property 98 | def aligned_landmarks(self): 99 | """ Return the landmarks location transposed to extracted face """ 100 | return AlignerExtract().transform_points(self.landmarksXY, 101 | self.aligned["matrix"], 102 | self.aligned["size"], 103 | self.aligned["padding"]) 104 | 105 | @property 106 | def aligned_face(self): 107 | """ Return aligned detected face """ 108 | return self.aligned["face"] 109 | 110 | @property 111 | def adjusted_matrix(self): 112 | """ Return adjusted matrix for size/padding combination """ 113 | return AlignerExtract().transform_matrix(self.aligned["matrix"], 114 | self.aligned["size"], 115 | self.aligned["padding"]) 116 | -------------------------------------------------------------------------------- /lib/gpu_stats.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin python3 2 | """ Information on available Nvidia GPUs """ 3 | 4 | import platform 5 | 6 | if platform.system() == 'Darwin': 7 | import pynvx 8 | is_macos = True 9 | else: 10 | import pynvml 11 | is_macos = False 12 | 13 | 14 | class GPUStats(): 15 | """ Holds information about system GPU(s) """ 16 | def __init__(self): 17 | self.verbose = False 18 | 19 | self.initialized = False 20 | self.device_count = 0 21 | self.handles = None 22 | self.driver = None 23 | self.devices = None 24 | self.vram = None 25 | 26 | self.initialize() 27 | 28 | if self.device_count == 0: 29 | return 30 | 31 | self.driver = self.get_driver() 32 | self.devices = self.get_devices() 33 | self.vram = self.get_vram() 34 | 35 | self.shutdown() 36 | 37 | def initialize(self): 38 | """ Initialize pynvml """ 39 | if not self.initialized: 40 | if is_macos: 41 | try: 42 | pynvx.cudaInit() 43 | except RuntimeError: 44 | self.initialized = True 45 | return 46 | else: 47 | try: 48 | pynvml.nvmlInit() 49 | except pynvml.NVMLError_LibraryNotFound: 50 | self.initialized = True 51 | return 52 | self.initialized = True 53 | self.get_device_count() 54 | self.get_handles() 55 | 56 | def shutdown(self): 57 | """ Shutdown pynvml """ 58 | if self.initialized: 59 | self.handles = None 60 | if not is_macos: 61 | pynvml.nvmlShutdown() 62 | self.initialized = False 63 | 64 | def get_device_count(self): 65 | """ Return count of Nvidia devices """ 66 | if is_macos: 67 | self.device_count = pynvx.cudaDeviceGetCount(ignore=True) 68 | else: 69 | try: 70 | self.device_count = pynvml.nvmlDeviceGetCount() 71 | except pynvml.NVMLError: 72 | self.device_count = 0 73 | 74 | def get_handles(self): 75 | """ Return all listed Nvidia handles """ 76 | if is_macos: 77 | self.handles = pynvx.cudaDeviceGetHandles(ignore=True) 78 | else: 79 | self.handles = [pynvml.nvmlDeviceGetHandleByIndex(i) 80 | for i in range(self.device_count)] 81 | 82 | @staticmethod 83 | def get_driver(): 84 | """ Get the driver version """ 85 | if is_macos: 86 | driver = pynvx.cudaSystemGetDriverVersion(ignore=True) 87 | else: 88 | try: 89 | driver = pynvml.nvmlSystemGetDriverVersion().decode("utf-8") 90 | except pynvml.NVMLError: 91 | driver = "No Nvidia driver found" 92 | return driver 93 | 94 | def get_devices(self): 95 | """ Return name of devices """ 96 | self.initialize() 97 | if is_macos: 98 | names = [pynvx.cudaGetName(handle, ignore=True) 99 | for handle in self.handles] 100 | else: 101 | names = [pynvml.nvmlDeviceGetName(handle).decode("utf-8") 102 | for handle in self.handles] 103 | return names 104 | 105 | def get_vram(self): 106 | """ Return total vram in megabytes per device """ 107 | self.initialize() 108 | if is_macos: 109 | vram = [pynvx.cudaGetMemTotal(handle, ignore=True) / (1024 * 1024) 110 | for handle in self.handles] 111 | else: 112 | vram = [pynvml.nvmlDeviceGetMemoryInfo(handle).total / 113 | (1024 * 1024) 114 | for handle in self.handles] 115 | return vram 116 | 117 | def get_used(self): 118 | """ Return the vram in use """ 119 | self.initialize() 120 | if is_macos: 121 | vram = [pynvx.cudaGetMemUsed(handle, ignore=True) / (1024 * 1024) 122 | for handle in self.handles] 123 | else: 124 | vram = [pynvml.nvmlDeviceGetMemoryInfo(handle).used / (1024 * 1024) 125 | for handle in self.handles] 126 | self.shutdown() 127 | 128 | if self.verbose: 129 | print("GPU VRAM used: {}".format(vram)) 130 | 131 | return vram 132 | 133 | def get_free(self): 134 | """ Return the vram available """ 135 | self.initialize() 136 | if is_macos: 137 | vram = [pynvx.cudaGetMemFree(handle, ignore=True) / (1024 * 1024) 138 | for handle in self.handles] 139 | else: 140 | vram = [pynvml.nvmlDeviceGetMemoryInfo(handle).free / (1024 * 1024) 141 | for handle in self.handles] 142 | self.shutdown() 143 | return vram 144 | 145 | def get_card_most_free(self): 146 | """ Return the card and available VRAM for card with 147 | most VRAM free """ 148 | if self.device_count == 0: 149 | return {"card_id": -1, 150 | "device": "No Nvidia devices found", 151 | "free": 2048, 152 | "total": 2048} 153 | free_vram = self.get_free() 154 | vram_free = max(free_vram) 155 | card_id = free_vram.index(vram_free) 156 | return {"card_id": card_id, 157 | "device": self.devices[card_id], 158 | "free": vram_free, 159 | "total": self.vram[card_id]} 160 | 161 | def print_info(self): 162 | """ Output GPU info in verbose mode """ 163 | print("GPU Driver: {}".format(self.driver)) 164 | print("GPU Device count: {}".format(self.device_count)) 165 | print("GPU Devices: {}".format(self.devices)) 166 | print("GPU VRAM: {}".format(self.vram)) 167 | -------------------------------------------------------------------------------- /lib/gui/.cache/icons/clear.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/lib/gui/.cache/icons/clear.png -------------------------------------------------------------------------------- /lib/gui/.cache/icons/graph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/lib/gui/.cache/icons/graph.png -------------------------------------------------------------------------------- /lib/gui/.cache/icons/move.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/lib/gui/.cache/icons/move.png -------------------------------------------------------------------------------- /lib/gui/.cache/icons/open_file.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/lib/gui/.cache/icons/open_file.png -------------------------------------------------------------------------------- /lib/gui/.cache/icons/open_folder.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/lib/gui/.cache/icons/open_folder.png -------------------------------------------------------------------------------- /lib/gui/.cache/icons/reset.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/lib/gui/.cache/icons/reset.png -------------------------------------------------------------------------------- /lib/gui/.cache/icons/save.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/lib/gui/.cache/icons/save.png -------------------------------------------------------------------------------- /lib/gui/.cache/icons/zoom.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/lib/gui/.cache/icons/zoom.png -------------------------------------------------------------------------------- /lib/gui/.cache/preview/.keep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/lib/gui/.cache/preview/.keep -------------------------------------------------------------------------------- /lib/gui/__init__.py: -------------------------------------------------------------------------------- 1 | from lib.gui.command import CommandNotebook 2 | from lib.gui.display import DisplayNotebook 3 | from lib.gui.options import CliOptions, Config 4 | from lib.gui.stats import CurrentSession 5 | from lib.gui.statusbar import StatusBar 6 | from lib.gui.utils import ConsoleOut, Images 7 | from lib.gui.wrapper import ProcessWrapper 8 | -------------------------------------------------------------------------------- /lib/gui/display.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin python3 2 | """ Display Frame of the Faceswap GUI 3 | 4 | What is displayed in the Display Frame varies 5 | depending on what tasked is being run """ 6 | 7 | import tkinter as tk 8 | from tkinter import ttk 9 | 10 | from .display_analysis import Analysis 11 | from .display_command import GraphDisplay, PreviewExtract, PreviewTrain 12 | 13 | 14 | class DisplayNotebook(ttk.Notebook): 15 | """ The display tabs """ 16 | 17 | def __init__(self, parent, session, tk_vars, scaling_factor): 18 | ttk.Notebook.__init__(self, parent, width=780) 19 | parent.add(self) 20 | 21 | self.wrapper_var = tk_vars["display"] 22 | self.runningtask = tk_vars["runningtask"] 23 | self.session = session 24 | 25 | self.set_wrapper_var_trace() 26 | self.add_static_tabs(scaling_factor) 27 | self.static_tabs = [child for child in self.tabs()] 28 | 29 | def set_wrapper_var_trace(self): 30 | """ Set the trigger actions for the display vars 31 | when they have been triggered in the Process Wrapper """ 32 | self.wrapper_var.trace("w", self.update_displaybook) 33 | 34 | def add_static_tabs(self, scaling_factor): 35 | """ Add tabs that are permanently available """ 36 | for tab in ("job queue", "analysis"): 37 | if tab == "job queue": 38 | continue # Not yet implemented 39 | if tab == "analysis": 40 | helptext = {"stats": 41 | "Summary statistics for each training session"} 42 | frame = Analysis(self, tab, helptext, scaling_factor) 43 | else: 44 | frame = self.add_frame() 45 | self.add(frame, text=tab.title()) 46 | 47 | def add_frame(self): 48 | """ Add a single frame for holding tab's contents """ 49 | frame = ttk.Frame(self) 50 | frame.pack(side=tk.LEFT, fill=tk.BOTH, expand=True, padx=5, pady=5) 51 | return frame 52 | 53 | def command_display(self, command): 54 | """ Select what to display based on incoming 55 | command """ 56 | build_tabs = getattr(self, "{}_tabs".format(command)) 57 | build_tabs() 58 | 59 | def extract_tabs(self): 60 | """ Build the extract tabs """ 61 | helptext = ("Updates preview from output every 5 " 62 | "seconds to limit disk contention") 63 | PreviewExtract(self, "preview", helptext, 5000) 64 | 65 | def train_tabs(self): 66 | """ Build the train tabs """ 67 | for tab in ("graph", "preview"): 68 | if tab == "graph": 69 | helptext = "Graph showing Loss vs Iterations" 70 | GraphDisplay(self, "graph", helptext, 5000) 71 | elif tab == "preview": 72 | helptext = "Training preview. Updated on every save iteration" 73 | PreviewTrain(self, "preview", helptext, 5000) 74 | 75 | def convert_tabs(self): 76 | """ Build the convert tabs 77 | Currently identical to Extract, so just call that """ 78 | self.extract_tabs() 79 | 80 | def remove_tabs(self): 81 | """ Remove all command specific tabs """ 82 | for child in self.tabs(): 83 | if child not in self.static_tabs: 84 | self.forget(child) 85 | 86 | def update_displaybook(self, *args): 87 | """ Set the display tabs based on executing task """ 88 | command = self.wrapper_var.get() 89 | self.remove_tabs() 90 | if not command or command not in ("extract", "train", "convert"): 91 | return 92 | self.command_display(command) 93 | -------------------------------------------------------------------------------- /lib/gui/display_command.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin python3 2 | """ Command specific tabs of Display Frame of the Faceswap GUI """ 3 | import datetime 4 | import os 5 | import tkinter as tk 6 | 7 | from tkinter import ttk 8 | 9 | 10 | from .display_graph import TrainingGraph 11 | from .display_page import DisplayOptionalPage 12 | from .tooltip import Tooltip 13 | from .stats import Calculations 14 | from .utils import Images, FileHandler 15 | 16 | 17 | class PreviewExtract(DisplayOptionalPage): 18 | """ Tab to display output preview images for extract and convert """ 19 | 20 | def display_item_set(self): 21 | """ Load the latest preview if available """ 22 | Images().load_latest_preview() 23 | self.display_item = Images().previewoutput 24 | 25 | def display_item_process(self): 26 | """ Display the preview """ 27 | if not self.subnotebook.children: 28 | self.add_child() 29 | else: 30 | self.update_child() 31 | 32 | def add_child(self): 33 | """ Add the preview label child """ 34 | preview = self.subnotebook_add_page(self.tabname, widget=None) 35 | lblpreview = ttk.Label(preview, image=Images().previewoutput[1]) 36 | lblpreview.pack(side=tk.TOP, anchor=tk.NW) 37 | Tooltip(lblpreview, text=self.helptext, wraplength=200) 38 | 39 | def update_child(self): 40 | """ Update the preview image on the label """ 41 | for widget in self.subnotebook_get_widgets(): 42 | widget.configure(image=Images().previewoutput[1]) 43 | 44 | def save_items(self): 45 | """ Open save dialogue and save preview """ 46 | location = FileHandler("dir", None).retfile 47 | if not location: 48 | return 49 | filename = "extract_convert_preview" 50 | now = datetime.datetime.now().strftime("%Y%m%d_%H%M%S") 51 | filename = os.path.join(location, 52 | "{}_{}.{}".format(filename, 53 | now, 54 | "png")) 55 | Images().previewoutput[0].save(filename) 56 | print("Saved preview to {}".format(filename)) 57 | 58 | 59 | class PreviewTrain(DisplayOptionalPage): 60 | """ Training preview image(s) """ 61 | 62 | def display_item_set(self): 63 | """ Load the latest preview if available """ 64 | Images().load_training_preview() 65 | self.display_item = Images().previewtrain 66 | 67 | def display_item_process(self): 68 | """ Display the preview(s) resized as appropriate """ 69 | sortednames = sorted([name for name in Images().previewtrain.keys()]) 70 | existing = self.subnotebook_get_titles_ids() 71 | 72 | for name in sortednames: 73 | if name not in existing.keys(): 74 | self.add_child(name) 75 | else: 76 | tab_id = existing[name] 77 | self.update_child(tab_id, name) 78 | 79 | def add_child(self, name): 80 | """ Add the preview canvas child """ 81 | preview = PreviewTrainCanvas(self.subnotebook, name) 82 | preview = self.subnotebook_add_page(name, widget=preview) 83 | Tooltip(preview, text=self.helptext, wraplength=200) 84 | self.vars["modified"].set(Images().previewtrain[name][2]) 85 | 86 | def update_child(self, tab_id, name): 87 | """ Update the preview canvas """ 88 | if self.vars["modified"].get() != Images().previewtrain[name][2]: 89 | self.vars["modified"].set(Images().previewtrain[name][2]) 90 | widget = self.subnotebook_page_from_id(tab_id) 91 | widget.reload() 92 | 93 | def save_items(self): 94 | """ Open save dialogue and save preview """ 95 | location = FileHandler("dir", None).retfile 96 | if not location: 97 | return 98 | for preview in self.subnotebook.children.values(): 99 | preview.save_preview(location) 100 | 101 | 102 | class PreviewTrainCanvas(ttk.Frame): 103 | """ Canvas to hold a training preview image """ 104 | def __init__(self, parent, previewname): 105 | ttk.Frame.__init__(self, parent) 106 | 107 | self.name = previewname 108 | Images().resize_image(self.name, None) 109 | self.previewimage = Images().previewtrain[self.name][1] 110 | 111 | self.canvas = tk.Canvas(self, bd=0, highlightthickness=0) 112 | self.canvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True) 113 | self.imgcanvas = self.canvas.create_image(0, 114 | 0, 115 | image=self.previewimage, 116 | anchor=tk.NW) 117 | self.bind("", self.resize) 118 | 119 | def resize(self, event): 120 | """ Resize the image to fit the frame, maintaining aspect ratio """ 121 | framesize = (event.width, event.height) 122 | # Sometimes image is resized before frame is drawn 123 | framesize = None if framesize == (1, 1) else framesize 124 | Images().resize_image(self.name, framesize) 125 | self.reload() 126 | 127 | def reload(self): 128 | """ Reload the preview image """ 129 | self.previewimage = Images().previewtrain[self.name][1] 130 | self.canvas.itemconfig(self.imgcanvas, image=self.previewimage) 131 | 132 | def save_preview(self, location): 133 | """ Save the figure to file """ 134 | filename = self.name 135 | now = datetime.datetime.now().strftime("%Y%m%d_%H%M%S") 136 | filename = os.path.join(location, 137 | "{}_{}.{}".format(filename, 138 | now, 139 | "png")) 140 | Images().previewtrain[self.name][0].save(filename) 141 | print("Saved preview to {}".format(filename)) 142 | 143 | 144 | class GraphDisplay(DisplayOptionalPage): 145 | """ The Graph Tab of the Display section """ 146 | 147 | def display_item_set(self): 148 | """ Load the graph(s) if available """ 149 | if self.session.stats["iterations"] == 0: 150 | self.display_item = None 151 | else: 152 | self.display_item = self.session.stats 153 | 154 | def display_item_process(self): 155 | """ Add a single graph to the graph window """ 156 | losskeys = self.display_item["losskeys"] 157 | loss = self.display_item["loss"] 158 | tabcount = int(len(losskeys) / 2) 159 | existing = self.subnotebook_get_titles_ids() 160 | for i in range(tabcount): 161 | selectedkeys = losskeys[i * 2:(i + 1) * 2] 162 | name = " - ".join(selectedkeys).title().replace("_", " ") 163 | if name not in existing.keys(): 164 | selectedloss = loss[i * 2:(i + 1) * 2] 165 | selection = {"loss": selectedloss, 166 | "losskeys": selectedkeys} 167 | data = Calculations(session=selection, 168 | display="loss", 169 | selections=["raw", "trend"]) 170 | self.add_child(name, data) 171 | 172 | def add_child(self, name, data): 173 | """ Add the graph for the selected keys """ 174 | graph = TrainingGraph(self.subnotebook, data, "Loss") 175 | graph.build() 176 | graph = self.subnotebook_add_page(name, widget=graph) 177 | Tooltip(graph, text=self.helptext, wraplength=200) 178 | 179 | def save_items(self): 180 | """ Open save dialogue and save graphs """ 181 | graphlocation = FileHandler("dir", None).retfile 182 | if not graphlocation: 183 | return 184 | for graph in self.subnotebook.children.values(): 185 | graph.save_fig(graphlocation) 186 | -------------------------------------------------------------------------------- /lib/gui/statusbar.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin python3 2 | """ Status bar for the GUI """ 3 | 4 | import tkinter as tk 5 | from tkinter import ttk 6 | 7 | 8 | class StatusBar(ttk.Frame): 9 | """ Status Bar for displaying the Status Message and 10 | Progress Bar """ 11 | 12 | def __init__(self, parent): 13 | ttk.Frame.__init__(self, parent) 14 | self.pack(side=tk.BOTTOM, padx=10, pady=2, fill=tk.X, expand=False) 15 | 16 | self.status_message = tk.StringVar() 17 | self.pbar_message = tk.StringVar() 18 | self.pbar_position = tk.IntVar() 19 | 20 | self.status_message.set("Ready") 21 | 22 | self.status() 23 | self.pbar = self.progress_bar() 24 | 25 | def status(self): 26 | """ Place Status into bottom bar """ 27 | statusframe = ttk.Frame(self) 28 | statusframe.pack(side=tk.LEFT, anchor=tk.W, fill=tk.X, expand=False) 29 | 30 | lbltitle = ttk.Label(statusframe, text="Status:", width=6, anchor=tk.W) 31 | lbltitle.pack(side=tk.LEFT, expand=False) 32 | 33 | lblstatus = ttk.Label(statusframe, 34 | width=20, 35 | textvariable=self.status_message, 36 | anchor=tk.W) 37 | lblstatus.pack(side=tk.LEFT, anchor=tk.W, fill=tk.X, expand=True) 38 | 39 | def progress_bar(self): 40 | """ Place progress bar into bottom bar """ 41 | progressframe = ttk.Frame(self) 42 | progressframe.pack(side=tk.RIGHT, anchor=tk.E, fill=tk.X) 43 | 44 | lblmessage = ttk.Label(progressframe, textvariable=self.pbar_message) 45 | lblmessage.pack(side=tk.LEFT, padx=3, fill=tk.X, expand=True) 46 | 47 | pbar = ttk.Progressbar(progressframe, 48 | length=200, 49 | variable=self.pbar_position, 50 | maximum=1000, 51 | mode="determinate") 52 | pbar.pack(side=tk.LEFT, padx=2, fill=tk.X, expand=True) 53 | pbar.pack_forget() 54 | return pbar 55 | 56 | def progress_start(self, mode): 57 | """ Set progress bar mode and display """ 58 | self.progress_set_mode(mode) 59 | self.pbar.pack() 60 | 61 | def progress_stop(self): 62 | """ Reset progress bar and hide """ 63 | self.pbar_message.set("") 64 | self.pbar_position.set(0) 65 | self.progress_set_mode("determinate") 66 | self.pbar.pack_forget() 67 | 68 | def progress_set_mode(self, mode): 69 | """ Set the progress bar mode """ 70 | self.pbar.config(mode=mode) 71 | if mode == "indeterminate": 72 | self.pbar.config(maximum=100) 73 | self.pbar.start() 74 | else: 75 | self.pbar.stop() 76 | self.pbar.config(maximum=1000) 77 | 78 | def progress_update(self, message, position, update_position=True): 79 | """ Update the GUIs progress bar and position """ 80 | self.pbar_message.set(message) 81 | if update_position: 82 | self.pbar_position.set(position) 83 | -------------------------------------------------------------------------------- /lib/gui/tooltip.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin python3 2 | """ Tooltip. Pops up help messages for the GUI """ 3 | import platform 4 | import tkinter as tk 5 | 6 | 7 | class Tooltip: 8 | """ 9 | Create a tooltip for a given widget as the mouse goes on it. 10 | 11 | Adapted from StackOverflow: 12 | 13 | http://stackoverflow.com/questions/3221956/ 14 | what-is-the-simplest-way-to-make-tooltips- 15 | in-tkinter/36221216#36221216 16 | 17 | http://www.daniweb.com/programming/software-development/ 18 | code/484591/a-tooltip-class-for-tkinter 19 | 20 | - Originally written by vegaseat on 2014.09.09. 21 | 22 | - Modified to include a delay time by Victor Zaccardo on 2016.03.25. 23 | 24 | - Modified 25 | - to correct extreme right and extreme bottom behavior, 26 | - to stay inside the screen whenever the tooltip might go out on 27 | the top but still the screen is higher than the tooltip, 28 | - to use the more flexible mouse positioning, 29 | - to add customizable background color, padding, waittime and 30 | wraplength on creation 31 | by Alberto Vassena on 2016.11.05. 32 | 33 | Tested on Ubuntu 16.04/16.10, running Python 3.5.2 34 | 35 | """ 36 | 37 | def __init__(self, widget, 38 | *, 39 | background="#FFFFEA", 40 | pad=(5, 3, 5, 3), 41 | text="widget info", 42 | waittime=400, 43 | wraplength=250): 44 | 45 | self.waittime = waittime # in miliseconds, originally 500 46 | self.wraplength = wraplength # in pixels, originally 180 47 | self.widget = widget 48 | self.text = text 49 | self.widget.bind("", self.on_enter) 50 | self.widget.bind("", self.on_leave) 51 | self.widget.bind("", self.on_leave) 52 | self.background = background 53 | self.pad = pad 54 | self.ident = None 55 | self.topwidget = None 56 | 57 | def on_enter(self, event=None): 58 | """ Schedule on an enter event """ 59 | self.schedule() 60 | 61 | def on_leave(self, event=None): 62 | """ Unschedule on a leave event """ 63 | self.unschedule() 64 | self.hide() 65 | 66 | def schedule(self): 67 | """ Show the tooltip after wait period """ 68 | self.unschedule() 69 | self.ident = self.widget.after(self.waittime, self.show) 70 | 71 | def unschedule(self): 72 | """ Hide the tooltip """ 73 | id_ = self.ident 74 | self.ident = None 75 | if id_: 76 | self.widget.after_cancel(id_) 77 | 78 | def show(self): 79 | """ Show the tooltip """ 80 | def tip_pos_calculator(widget, label, 81 | *, 82 | tip_delta=(10, 5), pad=(5, 3, 5, 3)): 83 | """ Calculate the tooltip position """ 84 | 85 | s_width, s_height = widget.winfo_screenwidth(), widget.winfo_screenheight() 86 | 87 | width, height = (pad[0] + label.winfo_reqwidth() + pad[2], 88 | pad[1] + label.winfo_reqheight() + pad[3]) 89 | 90 | mouse_x, mouse_y = widget.winfo_pointerxy() 91 | 92 | x_1, y_1 = mouse_x + tip_delta[0], mouse_y + tip_delta[1] 93 | x_2, y_2 = x_1 + width, y_1 + height 94 | 95 | x_delta = x_2 - s_width 96 | if x_delta < 0: 97 | x_delta = 0 98 | y_delta = y_2 - s_height 99 | if y_delta < 0: 100 | y_delta = 0 101 | 102 | offscreen = (x_delta, y_delta) != (0, 0) 103 | 104 | if offscreen: 105 | 106 | if x_delta: 107 | x_1 = mouse_x - tip_delta[0] - width 108 | 109 | if y_delta: 110 | y_1 = mouse_y - tip_delta[1] - height 111 | 112 | offscreen_again = y_1 < 0 # out on the top 113 | 114 | if offscreen_again: 115 | # No further checks will be done. 116 | 117 | # TIP: 118 | # A further mod might automagically augment the 119 | # wraplength when the tooltip is too high to be 120 | # kept inside the screen. 121 | y_1 = 0 122 | 123 | return x_1, y_1 124 | 125 | background = self.background 126 | pad = self.pad 127 | widget = self.widget 128 | 129 | # creates a toplevel window 130 | self.topwidget = tk.Toplevel(widget) 131 | if platform.system() == "Darwin": 132 | # For Mac OS 133 | self.topwidget.tk.call("::tk::unsupported::MacWindowStyle", 134 | "style", self.topwidget._w, 135 | "help", "none") 136 | 137 | # Leaves only the label and removes the app window 138 | self.topwidget.wm_overrideredirect(True) 139 | 140 | win = tk.Frame(self.topwidget, 141 | background=background, 142 | borderwidth=0) 143 | label = tk.Label(win, 144 | text=self.text, 145 | justify=tk.LEFT, 146 | background=background, 147 | relief=tk.SOLID, 148 | borderwidth=0, 149 | wraplength=self.wraplength) 150 | 151 | label.grid(padx=(pad[0], pad[2]), 152 | pady=(pad[1], pad[3]), 153 | sticky=tk.NSEW) 154 | win.grid() 155 | 156 | xpos, ypos = tip_pos_calculator(widget, label) 157 | 158 | self.topwidget.wm_geometry("+%d+%d" % (xpos, ypos)) 159 | 160 | def hide(self): 161 | """ Hide the tooltip """ 162 | topwidget = self.topwidget 163 | if topwidget: 164 | topwidget.destroy() 165 | self.topwidget = None 166 | -------------------------------------------------------------------------------- /lib/multithreading.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ Multithreading/processing utils for faceswap """ 3 | 4 | import multiprocessing as mp 5 | import queue as Queue 6 | import threading 7 | 8 | 9 | class PoolProcess(): 10 | """ Pool multiple processes """ 11 | def __init__(self, method, processes=None, verbose=False): 12 | self.verbose = verbose 13 | self.method = method 14 | self.procs = self.set_procs(processes) 15 | self.pool = None 16 | 17 | def set_procs(self, processes): 18 | """ Set the number of processes to use """ 19 | if processes is None: 20 | running_processes = len(mp.active_children()) 21 | processes = max(mp.cpu_count() - running_processes, 1) 22 | if self.verbose: 23 | print("Processing in {} processes".format(processes)) 24 | return processes 25 | 26 | def in_process(self, *args, **kwargs): 27 | """ Run the processing pool """ 28 | self.pool = mp.Pool(processes=self.procs) 29 | for _ in range(self.procs): 30 | self.pool.apply_async(self.method, args=args, kwds=kwargs) 31 | 32 | def join(self): 33 | """ Join the process """ 34 | self.pool.close() 35 | self.pool.join() 36 | 37 | 38 | class SpawnProcess(): 39 | """ Process in spawnable context 40 | Must be spawnable to share CUDA across processes """ 41 | def __init__(self): 42 | self.context = mp.get_context("spawn") 43 | self.daemonize = True 44 | self.process = None 45 | self.event = self.context.Event() 46 | 47 | def in_process(self, target, *args, **kwargs): 48 | """ Start a process in the spawn context """ 49 | kwargs["event"] = self.event 50 | self.process = self.context.Process(target=target, 51 | args=args, 52 | kwargs=kwargs) 53 | self.process.daemon = self.daemonize 54 | self.process.start() 55 | 56 | def join(self): 57 | """ Join the process """ 58 | self.process.join() 59 | 60 | 61 | class MultiThread(): 62 | """ Threading for IO heavy ops """ 63 | def __init__(self, thread_count=1): 64 | self.thread_count = thread_count 65 | self.threads = list() 66 | 67 | def in_thread(self, target, *args, **kwargs): 68 | """ Start a thread with the given method and args """ 69 | for _ in range(self.thread_count): 70 | thread = threading.Thread(target=target, args=args, kwargs=kwargs) 71 | thread.daemon = True 72 | thread.start() 73 | self.threads.append(thread) 74 | 75 | def join_threads(self): 76 | """ Join the running threads """ 77 | for thread in self.threads: 78 | thread.join() 79 | 80 | 81 | class BackgroundGenerator(threading.Thread): 82 | """ Run a queue in the background. From: 83 | https://stackoverflow.com/questions/7323664/ """ 84 | # See below why prefetch count is flawed 85 | def __init__(self, generator, prefetch=1): 86 | threading.Thread.__init__(self) 87 | self.queue = Queue.Queue(maxsize=prefetch) 88 | self.generator = generator 89 | self.daemon = True 90 | self.start() 91 | 92 | def run(self): 93 | """ Put until queue size is reached. 94 | Note: put blocks only if put is called while queue has already 95 | reached max size => this makes 2 prefetched items! One in the 96 | queue, one waiting for insertion! """ 97 | for item in self.generator: 98 | self.queue.put(item) 99 | self.queue.put(None) 100 | 101 | def iterator(self): 102 | """ Iterate items out of the queue """ 103 | while True: 104 | next_item = self.queue.get() 105 | if next_item is None: 106 | break 107 | yield next_item 108 | -------------------------------------------------------------------------------- /lib/queue_manager.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ Queue Manager for faceswap 3 | 4 | NB: Keep this in it's own module! If it gets loaded from 5 | a multiprocess on a Windows System it will break Faceswap""" 6 | 7 | import multiprocessing as mp 8 | import threading 9 | 10 | from queue import Empty as QueueEmpty # Used for imports 11 | from time import sleep 12 | 13 | 14 | class QueueManager(): 15 | """ Manage queues for availabilty across processes 16 | Don't import this class directly, instead 17 | import the variable: queue_manager """ 18 | def __init__(self): 19 | self.manager = mp.Manager() 20 | self.queues = dict() 21 | 22 | def add_queue(self, name, maxsize=0): 23 | """ Add a queue to the manager """ 24 | if name in self.queues.keys(): 25 | raise ValueError("Queue '{}' already exists.".format(name)) 26 | queue = self.manager.Queue(maxsize=maxsize) 27 | self.queues[name] = queue 28 | 29 | def del_queue(self, name): 30 | """ remove a queue from the manager """ 31 | del self.queues[name] 32 | 33 | def get_queue(self, name, maxsize=0): 34 | """ Return a queue from the manager 35 | If it doesn't exist, create it """ 36 | queue = self.queues.get(name, None) 37 | if queue: 38 | return queue 39 | self.add_queue(name, maxsize) 40 | return self.queues[name] 41 | 42 | def terminate_queues(self): 43 | """ Clear all queues and send EOF 44 | To be called if there is an error """ 45 | for queue in self.queues.values(): 46 | while not queue.empty(): 47 | queue.get() 48 | queue.put("EOF") 49 | 50 | def debug_monitor(self, update_secs=2): 51 | """ Debug tool for monitoring queues """ 52 | thread = threading.Thread(target=self.debug_queue_sizes, 53 | args=(update_secs, )) 54 | thread.daemon = True 55 | thread.start() 56 | 57 | def debug_queue_sizes(self, update_secs): 58 | """ Output the queue sizes """ 59 | while True: 60 | print("=== QUEUE SIZES ===") 61 | for name in sorted(self.queues.keys()): 62 | print(name, self.queues[name].qsize()) 63 | print("====================\n") 64 | sleep(update_secs) 65 | 66 | 67 | queue_manager = QueueManager() 68 | -------------------------------------------------------------------------------- /lib/training_data.py: -------------------------------------------------------------------------------- 1 | from random import shuffle 2 | import cv2 3 | import numpy 4 | 5 | from .multithreading import BackgroundGenerator 6 | from .umeyama import umeyama 7 | 8 | class TrainingDataGenerator(): 9 | def __init__(self, random_transform_args, coverage, scale=5, zoom=1): #TODO thos default should stay in the warp function 10 | self.random_transform_args = random_transform_args 11 | self.coverage = coverage 12 | self.scale = scale 13 | self.zoom = zoom 14 | 15 | def minibatchAB(self, images, batchsize, doShuffle=True): 16 | batch = BackgroundGenerator(self.minibatch(images, batchsize, doShuffle), 1) 17 | for ep1, warped_img, target_img in batch.iterator(): 18 | yield ep1, warped_img, target_img 19 | 20 | # A generator function that yields epoch, batchsize of warped_img and batchsize of target_img 21 | def minibatch(self, data, batchsize, doShuffle=True): 22 | length = len(data) 23 | assert length >= batchsize, "Number of images is lower than batch-size (Note that too few images may lead to bad training). # images: {}, batch-size: {}".format(length, batchsize) 24 | epoch = i = 0 25 | if doShuffle: 26 | shuffle(data) 27 | while True: 28 | size = batchsize 29 | if i+size > length: 30 | if doShuffle: 31 | shuffle(data) 32 | i = 0 33 | epoch+=1 34 | rtn = numpy.float32([self.read_image(img) for img in data[i:i+size]]) 35 | i+=size 36 | yield epoch, rtn[:,0,:,:,:], rtn[:,1,:,:,:] 37 | 38 | def color_adjust(self, img): 39 | return img / 255.0 40 | 41 | def read_image(self, fn): 42 | try: 43 | image = self.color_adjust(cv2.imread(fn)) 44 | except TypeError: 45 | raise Exception("Error while reading image", fn) 46 | 47 | image = cv2.resize(image, (256,256)) 48 | image = self.random_transform( image, **self.random_transform_args ) 49 | warped_img, target_img = self.random_warp( image, self.coverage, self.scale, self.zoom ) 50 | 51 | return warped_img, target_img 52 | 53 | def random_transform(self, image, rotation_range, zoom_range, shift_range, random_flip): 54 | h, w = image.shape[0:2] 55 | rotation = numpy.random.uniform(-rotation_range, rotation_range) 56 | scale = numpy.random.uniform(1 - zoom_range, 1 + zoom_range) 57 | tx = numpy.random.uniform(-shift_range, shift_range) * w 58 | ty = numpy.random.uniform(-shift_range, shift_range) * h 59 | mat = cv2.getRotationMatrix2D((w // 2, h // 2), rotation, scale) 60 | mat[:, 2] += (tx, ty) 61 | result = cv2.warpAffine( 62 | image, mat, (w, h), borderMode=cv2.BORDER_REPLICATE) 63 | if numpy.random.random() < random_flip: 64 | result = result[:, ::-1] 65 | return result 66 | 67 | # get pair of random warped images from aligned face image 68 | def random_warp(self, image, coverage, scale = 5, zoom = 1): 69 | assert image.shape == (256, 256, 3) 70 | range_ = numpy.linspace(128 - coverage//2, 128 + coverage//2, 5) 71 | mapx = numpy.broadcast_to(range_, (5, 5)) 72 | mapy = mapx.T 73 | 74 | mapx = mapx + numpy.random.normal(size=(5,5), scale=scale) 75 | mapy = mapy + numpy.random.normal(size=(5,5), scale=scale) 76 | 77 | interp_mapx = cv2.resize(mapx, (80*zoom,80*zoom))[8*zoom:72*zoom,8*zoom:72*zoom].astype('float32') 78 | interp_mapy = cv2.resize(mapy, (80*zoom,80*zoom))[8*zoom:72*zoom,8*zoom:72*zoom].astype('float32') 79 | 80 | warped_image = cv2.remap(image, interp_mapx, interp_mapy, cv2.INTER_LINEAR) 81 | 82 | src_points = numpy.stack([mapx.ravel(), mapy.ravel() ], axis=-1) 83 | dst_points = numpy.mgrid[0:65*zoom:16*zoom,0:65*zoom:16*zoom].T.reshape(-1,2) 84 | mat = umeyama(src_points, dst_points, True)[0:2] 85 | 86 | target_image = cv2.warpAffine(image, mat, (64*zoom,64*zoom)) 87 | 88 | return warped_image, target_image 89 | 90 | def stack_images(images): 91 | def get_transpose_axes(n): 92 | if n % 2 == 0: 93 | y_axes = list(range(1, n - 1, 2)) 94 | x_axes = list(range(0, n - 1, 2)) 95 | else: 96 | y_axes = list(range(0, n - 1, 2)) 97 | x_axes = list(range(1, n - 1, 2)) 98 | return y_axes, x_axes, [n - 1] 99 | 100 | images_shape = numpy.array(images.shape) 101 | new_axes = get_transpose_axes(len(images_shape)) 102 | new_shape = [numpy.prod(images_shape[x]) for x in new_axes] 103 | return numpy.transpose( 104 | images, 105 | axes=numpy.concatenate(new_axes) 106 | ).reshape(new_shape) 107 | -------------------------------------------------------------------------------- /lib/umeyama.py: -------------------------------------------------------------------------------- 1 | ## License (Modified BSD) 2 | ## Copyright (C) 2011, the scikit-image team All rights reserved. 3 | ## 4 | ## Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 5 | ## 6 | ## Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 7 | ## Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 8 | ## Neither the name of skimage nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. 9 | ## THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 10 | 11 | # umeyama function from scikit-image/skimage/transform/_geometric.py 12 | 13 | import numpy as np 14 | 15 | 16 | def umeyama(src, dst, estimate_scale): 17 | """Estimate N-D similarity transformation with or without scaling. 18 | Parameters 19 | ---------- 20 | src : (M, N) array 21 | Source coordinates. 22 | dst : (M, N) array 23 | Destination coordinates. 24 | estimate_scale : bool 25 | Whether to estimate scaling factor. 26 | Returns 27 | ------- 28 | T : (N + 1, N + 1) 29 | The homogeneous similarity transformation matrix. The matrix contains 30 | NaN values only if the problem is not well-conditioned. 31 | References 32 | ---------- 33 | .. [1] "Least-squares estimation of transformation parameters between two 34 | point patterns", Shinji Umeyama, PAMI 1991, DOI: 10.1109/34.88573 35 | """ 36 | 37 | num = src.shape[0] 38 | dim = src.shape[1] 39 | 40 | # Compute mean of src and dst. 41 | src_mean = src.mean(axis=0) 42 | dst_mean = dst.mean(axis=0) 43 | 44 | # Subtract mean from src and dst. 45 | src_demean = src - src_mean 46 | dst_demean = dst - dst_mean 47 | 48 | # Eq. (38). 49 | A = np.dot(dst_demean.T, src_demean) / num 50 | 51 | # Eq. (39). 52 | d = np.ones((dim,), dtype=np.double) 53 | if np.linalg.det(A) < 0: 54 | d[dim - 1] = -1 55 | 56 | T = np.eye(dim + 1, dtype=np.double) 57 | 58 | U, S, V = np.linalg.svd(A) 59 | 60 | # Eq. (40) and (43). 61 | rank = np.linalg.matrix_rank(A) 62 | if rank == 0: 63 | return np.nan * T 64 | elif rank == dim - 1: 65 | if np.linalg.det(U) * np.linalg.det(V) > 0: 66 | T[:dim, :dim] = np.dot(U, V) 67 | else: 68 | s = d[dim - 1] 69 | d[dim - 1] = -1 70 | T[:dim, :dim] = np.dot(U, np.dot(np.diag(d), V)) 71 | d[dim - 1] = s 72 | else: 73 | T[:dim, :dim] = np.dot(U, np.dot(np.diag(d), V.T)) 74 | 75 | if estimate_scale: 76 | # Eq. (41) and (42). 77 | scale = 1.0 / src_demean.var(axis=0).sum() * np.dot(S, d) 78 | else: 79 | scale = 1.0 80 | 81 | T[:dim, dim] = dst_mean - scale * np.dot(T[:dim, :dim], src_mean.T) 82 | T[:dim, :dim] *= scale 83 | 84 | return T 85 | -------------------------------------------------------------------------------- /model/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/model/.DS_Store -------------------------------------------------------------------------------- /output/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/output/.DS_Store -------------------------------------------------------------------------------- /plugins/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/plugins/__init__.py -------------------------------------------------------------------------------- /plugins/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/plugins/__init__.pyc -------------------------------------------------------------------------------- /plugins/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/plugins/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /plugins/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/plugins/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /plugins/__pycache__/plugin_loader.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/plugins/__pycache__/plugin_loader.cpython-36.pyc -------------------------------------------------------------------------------- /plugins/__pycache__/plugin_loader.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/plugins/__pycache__/plugin_loader.cpython-37.pyc -------------------------------------------------------------------------------- /plugins/convert/Convert_Adjust.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ Adjust converter for faceswap.py 3 | 4 | Based on the original https://www.reddit.com/r/deepfakes/ code sample 5 | Adjust code made by https://github.com/yangchen8710 """ 6 | 7 | import cv2 8 | import numpy as np 9 | 10 | from lib.utils import add_alpha_channel 11 | 12 | 13 | class Convert(): 14 | """ Adjust Converter """ 15 | def __init__(self, encoder, smooth_mask=True, avg_color_adjust=True, 16 | draw_transparent=False, **kwargs): 17 | self.encoder = encoder 18 | 19 | self.use_smooth_mask = smooth_mask 20 | self.use_avg_color_adjust = avg_color_adjust 21 | self.draw_transparent = draw_transparent 22 | 23 | def patch_image(self, frame, detected_face, size): 24 | """ Patch swapped face onto original image """ 25 | # pylint: disable=no-member 26 | # assert image.shape == (256, 256, 3) 27 | padding = 48 28 | face_size = 256 29 | detected_face.load_aligned(frame, face_size, padding, 30 | align_eyes=False) 31 | src_face = detected_face.aligned_face 32 | 33 | crop = slice(padding, face_size - padding) 34 | process_face = src_face[crop, crop] 35 | old_face = process_face.copy() 36 | 37 | process_face = cv2.resize(process_face, 38 | (size, size), 39 | interpolation=cv2.INTER_AREA) 40 | process_face = np.expand_dims(process_face, 0) 41 | 42 | new_face = self.encoder(process_face / 255.0)[0] 43 | new_face = np.clip(new_face * 255, 0, 255).astype(src_face.dtype) 44 | new_face = cv2.resize( 45 | new_face, 46 | (face_size - padding * 2, face_size - padding * 2), 47 | interpolation=cv2.INTER_CUBIC) 48 | 49 | if self.use_avg_color_adjust: 50 | self.adjust_avg_color(old_face, new_face) 51 | if self.use_smooth_mask: 52 | self.smooth_mask(old_face, new_face) 53 | 54 | new_face = self.superpose(src_face, new_face, crop) 55 | new_image = frame.copy() 56 | 57 | if self.draw_transparent: 58 | new_image, new_face = self.convert_transparent(new_image, 59 | new_face) 60 | 61 | cv2.warpAffine( 62 | new_face, 63 | detected_face.adjusted_matrix, 64 | (detected_face.frame_dims[1], detected_face.frame_dims[0]), 65 | new_image, 66 | flags=cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, 67 | borderMode=cv2.BORDER_TRANSPARENT) 68 | return new_image 69 | 70 | @staticmethod 71 | def adjust_avg_color(old_face, new_face): 72 | """ Perform average color adjustment """ 73 | for i in range(new_face.shape[-1]): 74 | old_avg = old_face[:, :, i].mean() 75 | new_avg = new_face[:, :, i].mean() 76 | diff_int = (int)(old_avg - new_avg) 77 | for int_h in range(new_face.shape[0]): 78 | for int_w in range(new_face.shape[1]): 79 | temp = (new_face[int_h, int_w, i] + diff_int) 80 | if temp < 0: 81 | new_face[int_h, int_w, i] = 0 82 | elif temp > 255: 83 | new_face[int_h, int_w, i] = 255 84 | else: 85 | new_face[int_h, int_w, i] = temp 86 | 87 | @staticmethod 88 | def smooth_mask(old_face, new_face): 89 | """ Smooth the mask """ 90 | width, height, _ = new_face.shape 91 | crop = slice(0, width) 92 | mask = np.zeros_like(new_face) 93 | mask[height // 15:-height // 15, width // 15:-width // 15, :] = 255 94 | mask = cv2.GaussianBlur(mask, # pylint: disable=no-member 95 | (15, 15), 96 | 10) 97 | new_face[crop, crop] = (mask / 255 * new_face + 98 | (1 - mask / 255) * old_face) 99 | 100 | @staticmethod 101 | def superpose(src_face, new_face, crop): 102 | """ Crop Face """ 103 | new_image = src_face.copy() 104 | new_image[crop, crop] = new_face 105 | return new_image 106 | 107 | @staticmethod 108 | def convert_transparent(image, new_face): 109 | """ Add alpha channels to images and change to 110 | transparent background """ 111 | height, width = image.shape[:2] 112 | image = np.zeros((height, width, 4), dtype=np.uint8) 113 | new_face = add_alpha_channel(new_face, 100) 114 | return image, new_face 115 | -------------------------------------------------------------------------------- /plugins/convert/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/plugins/convert/__init__.py -------------------------------------------------------------------------------- /plugins/convert/__pycache__/Convert_Masked.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/plugins/convert/__pycache__/Convert_Masked.cpython-36.pyc -------------------------------------------------------------------------------- /plugins/convert/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/plugins/convert/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /plugins/extract/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/plugins/extract/__init__.py -------------------------------------------------------------------------------- /plugins/extract/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/plugins/extract/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /plugins/extract/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/plugins/extract/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /plugins/extract/align/.cache/2DFAN-4.pb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/plugins/extract/align/.cache/2DFAN-4.pb -------------------------------------------------------------------------------- /plugins/extract/align/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/plugins/extract/align/__init__.py -------------------------------------------------------------------------------- /plugins/extract/align/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/plugins/extract/align/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /plugins/extract/align/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/plugins/extract/align/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /plugins/extract/align/__pycache__/_base.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/plugins/extract/align/__pycache__/_base.cpython-36.pyc -------------------------------------------------------------------------------- /plugins/extract/align/__pycache__/_base.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/plugins/extract/align/__pycache__/_base.cpython-37.pyc -------------------------------------------------------------------------------- /plugins/extract/align/__pycache__/dlib.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/plugins/extract/align/__pycache__/dlib.cpython-36.pyc -------------------------------------------------------------------------------- /plugins/extract/align/__pycache__/fan.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/plugins/extract/align/__pycache__/fan.cpython-36.pyc -------------------------------------------------------------------------------- /plugins/extract/align/__pycache__/fan.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/plugins/extract/align/__pycache__/fan.cpython-37.pyc -------------------------------------------------------------------------------- /plugins/extract/align/_base.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ Base class for Face Aligner plugins 3 | Plugins should inherit from this class 4 | 5 | See the override methods for which methods are 6 | required. 7 | 8 | The plugin will receive a dict containing: 9 | {"filename": , 10 | "image": , 11 | "detected_faces": } 12 | 13 | For each source item, the plugin must pass a dict to finalize containing: 14 | {"filename": , 15 | "image": , 16 | "detected_faces": , 17 | "landmarks": } 18 | """ 19 | 20 | import os 21 | 22 | from lib.aligner import Extract 23 | from lib.gpu_stats import GPUStats 24 | 25 | 26 | class Aligner(): 27 | """ Landmarks Aligner Object """ 28 | def __init__(self, verbose=False): 29 | self.verbose = verbose 30 | self.cachepath = os.path.join(os.path.dirname(__file__), ".cache") 31 | self.extract = Extract() 32 | self.init = None 33 | 34 | # The input and output queues for the plugin. 35 | # See lib.multithreading.QueueManager for getting queues 36 | self.queues = {"in": None, "out": None} 37 | 38 | # Path to model if required 39 | self.model_path = self.set_model_path() 40 | 41 | # Approximate VRAM required for aligner. Used to calculate 42 | # how many parallel processes / batches can be run. 43 | # Be conservative to avoid OOM. 44 | self.vram = None 45 | 46 | # <<< OVERRIDE METHODS >>> # 47 | # These methods must be overriden when creating a plugin 48 | @staticmethod 49 | def set_model_path(): 50 | """ path to data file/models 51 | override for specific detector """ 52 | raise NotImplementedError() 53 | 54 | def initialize(self, *args, **kwargs): 55 | """ Inititalize the aligner 56 | Tasks to be run before any alignments are performed. 57 | Override for specific detector """ 58 | self.init = kwargs["event"] 59 | self.queues["in"] = kwargs["in_queue"] 60 | self.queues["out"] = kwargs["out_queue"] 61 | 62 | def align(self, *args, **kwargs): 63 | """ Process landmarks 64 | Override for specific detector 65 | Must return a list of dlib rects""" 66 | try: 67 | if not self.init: 68 | self.initialize(*args, **kwargs) 69 | except ValueError as err: 70 | print("ERROR: {}".format(err)) 71 | exit(1) 72 | 73 | # <<< FINALIZE METHODS>>> # 74 | def finalize(self, output): 75 | """ This should be called as the final task of each plugin 76 | aligns faces and puts to the out queue """ 77 | if output == "EOF": 78 | self.queues["out"].put("EOF") 79 | return 80 | self.queues["out"].put((output)) 81 | 82 | # <<< MISC METHODS >>> # 83 | def get_vram_free(self): 84 | """ Return free and total VRAM on card with most VRAM free""" 85 | stats = GPUStats() 86 | vram = stats.get_card_most_free() 87 | if self.verbose: 88 | print("Using device {} with {}MB free of {}MB".format( 89 | vram["device"], 90 | int(vram["free"]), 91 | int(vram["total"]))) 92 | return int(vram["card_id"]), int(vram["free"]), int(vram["total"]) 93 | -------------------------------------------------------------------------------- /plugins/extract/align/dlib.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ DLib landmarks extractor for faceswap.py 3 | """ 4 | import face_recognition_models 5 | import dlib 6 | 7 | from ._base import Aligner 8 | 9 | 10 | class Align(Aligner): 11 | """ Perform transformation to align and get landmarks """ 12 | def __init__(self, **kwargs): 13 | super().__init__(**kwargs) 14 | self.vram = 0 # Doesn't use GPU 15 | self.model = None 16 | 17 | def set_model_path(self): 18 | """ Model path handled by face_recognition_models """ 19 | return face_recognition_models.pose_predictor_model_location() 20 | 21 | def initialize(self, *args, **kwargs): 22 | """ Initialization tasks to run prior to alignments """ 23 | super().initialize(*args, **kwargs) 24 | print("Initializing Dlib Pose Predictor...") 25 | self.model = dlib.shape_predictor(self.model_path) # pylint: disable=c-extension-no-member 26 | self.init.set() 27 | print("Initialized Dlib Pose Predictor.") 28 | 29 | def align(self, *args, **kwargs): 30 | """ Perform alignments on detected faces """ 31 | super().align(*args, **kwargs) 32 | while True: 33 | item = self.queues["in"].get() 34 | if item == "EOF": 35 | break 36 | if item.get("exception", False): 37 | self.queues["out"].put(item) 38 | break 39 | image = item["image"][:, :, ::-1].copy() 40 | item["landmarks"] = self.process_landmarks(image, item["detected_faces"]) 41 | self.finalize(item) 42 | self.finalize("EOF") 43 | 44 | def process_landmarks(self, image, detected_faces): 45 | """ Align image and process landmarks """ 46 | retval = list() 47 | for detected_face in detected_faces: 48 | pts = self.model(image, detected_face).parts() 49 | landmarks = [(point.x, point.y) for point in pts] 50 | retval.append(landmarks) 51 | return retval 52 | -------------------------------------------------------------------------------- /plugins/extract/detect/.cache/det1.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/plugins/extract/detect/.cache/det1.npy -------------------------------------------------------------------------------- /plugins/extract/detect/.cache/det2.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/plugins/extract/detect/.cache/det2.npy -------------------------------------------------------------------------------- /plugins/extract/detect/.cache/det3.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/plugins/extract/detect/.cache/det3.npy -------------------------------------------------------------------------------- /plugins/extract/detect/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/plugins/extract/detect/__init__.py -------------------------------------------------------------------------------- /plugins/extract/detect/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/plugins/extract/detect/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /plugins/extract/detect/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/plugins/extract/detect/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /plugins/extract/detect/__pycache__/_base.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/plugins/extract/detect/__pycache__/_base.cpython-36.pyc -------------------------------------------------------------------------------- /plugins/extract/detect/__pycache__/_base.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/plugins/extract/detect/__pycache__/_base.cpython-37.pyc -------------------------------------------------------------------------------- /plugins/extract/detect/__pycache__/mtcnn.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/plugins/extract/detect/__pycache__/mtcnn.cpython-36.pyc -------------------------------------------------------------------------------- /plugins/extract/detect/__pycache__/mtcnn.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/plugins/extract/detect/__pycache__/mtcnn.cpython-37.pyc -------------------------------------------------------------------------------- /plugins/extract/detect/dlib_cnn.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ DLIB CNN Face detection plugin """ 3 | 4 | import numpy as np 5 | import face_recognition_models 6 | from lib.utils import rotate_image_by_angle 7 | 8 | from ._base import Detector, dlib 9 | 10 | 11 | class Detect(Detector): 12 | """ Dlib detector for face recognition """ 13 | def __init__(self, **kwargs): 14 | super().__init__(**kwargs) 15 | self.target = (1792, 1792) # Uses approx 1805MB of VRAM 16 | self.vram = 1600 # Lower as batch size of 2 gives wiggle room 17 | self.detector = None 18 | 19 | def compiled_for_cuda(self): 20 | """ Return a message on DLIB Cuda Compilation status """ 21 | cuda = dlib.DLIB_USE_CUDA # pylint: disable=c-extension-no-member 22 | msg = "DLib is " 23 | if not cuda: 24 | msg += "NOT " 25 | msg += "compiled to use CUDA" 26 | if self.verbose: 27 | print(msg) 28 | return cuda 29 | 30 | def set_model_path(self): 31 | """ Model path handled by face_recognition_models """ 32 | return face_recognition_models.cnn_face_detector_model_location() 33 | 34 | def initialize(self, *args, **kwargs): 35 | """ Calculate batch size """ 36 | print("Initializing Dlib-CNN Detector...") 37 | super().initialize(*args, **kwargs) 38 | self.detector = dlib.cnn_face_detection_model_v1( # pylint: disable=c-extension-no-member 39 | self.model_path) 40 | is_cuda = self.compiled_for_cuda() 41 | if is_cuda: 42 | vram_free = self.get_vram_free() 43 | else: 44 | vram_free = 2048 45 | if self.verbose: 46 | print("Using CPU. Limiting RAM useage to " 47 | "{}MB".format(vram_free)) 48 | 49 | # Batch size of 2 actually uses about 338MB less than a single image?? 50 | # From there batches increase at ~680MB per item in the batch 51 | 52 | self.batch_size = int(((vram_free - self.vram) / 680) + 2) 53 | 54 | if self.batch_size < 1: 55 | raise ValueError("Insufficient VRAM available to continue " 56 | "({}MB)".format(int(vram_free))) 57 | 58 | if self.verbose: 59 | print("Processing in batches of {}".format(self.batch_size)) 60 | 61 | self.init.set() 62 | print("Initialized Dlib-CNN Detector...") 63 | 64 | def detect_faces(self, *args, **kwargs): 65 | """ Detect faces in rgb image """ 66 | super().detect_faces(*args, **kwargs) 67 | try: 68 | while True: 69 | exhausted, batch = self.get_batch() 70 | if not batch: 71 | break 72 | filenames, images = map(list, zip(*batch)) 73 | detect_images = self.compile_detection_images(images) 74 | batch_detected = self.detect_batch(detect_images) 75 | processed = self.process_output(batch_detected, 76 | indexes=None, 77 | rotation_matrix=None, 78 | output=None) 79 | if not all(faces 80 | for faces in processed) and self.rotation != [0]: 81 | processed = self.process_rotations(detect_images, 82 | processed) 83 | for idx, faces in enumerate(processed): 84 | retval = {"filename": filenames[idx], 85 | "image": images[idx], 86 | "detected_faces": faces} 87 | self.finalize(retval) 88 | if exhausted: 89 | break 90 | except: 91 | retval = {"exception": True} 92 | self.queues["out"].put(retval) 93 | del self.detector # Free up VRAM 94 | raise 95 | 96 | self.queues["out"].put("EOF") 97 | del self.detector # Free up VRAM 98 | 99 | def compile_detection_images(self, images): 100 | """ Compile the detection images into batches """ 101 | detect_images = list() 102 | for image in images: 103 | self.set_scale(image, is_square=True, scale_up=True) 104 | detect_images.append(self.set_detect_image(image)) 105 | return detect_images 106 | 107 | def detect_batch(self, detect_images, disable_message=False): 108 | """ Pass the batch through detector for consistently sized images 109 | or each image seperately for inconsitently sized images """ 110 | can_batch = self.check_batch_dims(detect_images) 111 | if can_batch: 112 | batch_detected = self.detector(detect_images, 0) 113 | else: 114 | if self.verbose and not disable_message: 115 | print("Batch has inconsistently sized images. Processing one " 116 | "image at a time") 117 | batch_detected = dlib.mmod_rectangless( # pylint: disable=c-extension-no-member 118 | [self.detector(detect_image, 0) for detect_image in detect_images]) 119 | return batch_detected 120 | 121 | @staticmethod 122 | def check_batch_dims(images): 123 | """ Check all images are the same size for batching """ 124 | dims = set(frame.shape[:2] for frame in images) 125 | return len(dims) == 1 126 | 127 | def process_output(self, batch_detected, 128 | indexes=None, rotation_matrix=None, output=None): 129 | """ Process the output images """ 130 | output = output if output else list() 131 | for idx, faces in enumerate(batch_detected): 132 | detected_faces = list() 133 | 134 | if isinstance(rotation_matrix, np.ndarray): 135 | faces = [self.rotate_rect(face.rect, rotation_matrix) 136 | for face in faces] 137 | 138 | for face in faces: 139 | face = self.convert_to_dlib_rectangle(face) 140 | face = dlib.rectangle( # pylint: disable=c-extension-no-member 141 | int(face.left() / self.scale), 142 | int(face.top() / self.scale), 143 | int(face.right() / self.scale), 144 | int(face.bottom() / self.scale)) 145 | detected_faces.append(face) 146 | if indexes: 147 | target = indexes[idx] 148 | output[target] = detected_faces 149 | else: 150 | output.append(detected_faces) 151 | return output 152 | 153 | def process_rotations(self, detect_images, processed): 154 | """ Rotate frames missing faces until face is found """ 155 | for angle in self.rotation: 156 | if all(faces for faces in processed): 157 | break 158 | if angle == 0: 159 | continue 160 | reprocess, indexes, rotmat = self.compile_reprocess( 161 | processed, 162 | detect_images, 163 | angle) 164 | 165 | batch_detected = self.detect_batch(reprocess, disable_message=True) 166 | if self.verbose and any(item for item in batch_detected): 167 | print("found face(s) by rotating image {} degrees".format( 168 | angle)) 169 | processed = self.process_output(batch_detected, 170 | indexes=indexes, 171 | rotation_matrix=rotmat, 172 | output=processed) 173 | return processed 174 | 175 | @staticmethod 176 | def compile_reprocess(processed, detect_images, angle): 177 | """ Rotate images which did not find a face for reprocessing """ 178 | indexes = list() 179 | to_detect = list() 180 | for idx, faces in enumerate(processed): 181 | if faces: 182 | continue 183 | image = detect_images[idx] 184 | rot_image, rot_matrix = rotate_image_by_angle(image, angle) 185 | to_detect.append(rot_image) 186 | indexes.append(idx) 187 | return to_detect, indexes, rot_matrix 188 | -------------------------------------------------------------------------------- /plugins/extract/detect/dlib_hog.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ DLIB CNN Face detection plugin """ 3 | from time import sleep 4 | 5 | import numpy as np 6 | 7 | from ._base import Detector, dlib 8 | 9 | 10 | class Detect(Detector): 11 | """ Dlib detector for face recognition """ 12 | def __init__(self, **kwargs): 13 | super().__init__(**kwargs) 14 | self.parent_is_pool = True 15 | self.target = (2048, 2048) # Doesn't use VRAM 16 | self.vram = 0 17 | self.detector = dlib.get_frontal_face_detector() 18 | self.iterator = None 19 | 20 | def set_model_path(self): 21 | """ No model for dlib Hog """ 22 | pass 23 | 24 | def initialize(self, *args, **kwargs): 25 | """ Calculate batch size """ 26 | print("Initializing Dlib-HOG Detector...") 27 | super().initialize(*args, **kwargs) 28 | if self.verbose: 29 | print("Using CPU for detection") 30 | self.init = True 31 | print("Initialized Dlib-HOG Detector...") 32 | 33 | def detect_faces(self, *args, **kwargs): 34 | """ Detect faces in rgb image """ 35 | super().detect_faces(*args, **kwargs) 36 | try: 37 | while True: 38 | item = self.queues["in"].get() 39 | if item in ("EOF", "END"): 40 | self.queues["in"].put("END") 41 | break 42 | 43 | filename, image = item 44 | detect_image = self.compile_detection_image(image, True, True) 45 | 46 | for angle in self.rotation: 47 | current_image, rotmat = self.rotate_image(detect_image, 48 | angle) 49 | 50 | faces = self.detector(current_image, 0) 51 | 52 | if self.verbose and angle != 0 and faces.any(): 53 | print("found face(s) by rotating image {} " 54 | "degrees".format(angle)) 55 | 56 | if faces: 57 | break 58 | 59 | detected_faces = self.process_output(faces, rotmat) 60 | retval = {"filename": filename, 61 | "image": image, 62 | "detected_faces": detected_faces} 63 | self.finalize(retval) 64 | except: 65 | retval = {"exception": True} 66 | self.queues["out"].put(retval) 67 | raise 68 | 69 | if item == "EOF": 70 | sleep(3) # Wait for all processes to finish before EOF (hacky!) 71 | self.queues["out"].put("EOF") 72 | 73 | def process_output(self, faces, rotation_matrix): 74 | """ Compile found faces for output """ 75 | if isinstance(rotation_matrix, np.ndarray): 76 | faces = [self.rotate_rect(face, rotation_matrix) 77 | for face in faces] 78 | detected = [dlib.rectangle(int(face.left() / self.scale), 79 | int(face.top() / self.scale), 80 | int(face.right() / self.scale), 81 | int(face.bottom() / self.scale)) 82 | for face in faces] 83 | return detected 84 | -------------------------------------------------------------------------------- /plugins/extract/detect/manual.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ Manual face detection plugin """ 3 | 4 | from ._base import Detector, dlib 5 | 6 | 7 | class Detect(Detector): 8 | """ Manual Detector """ 9 | def __init__(self, **kwargs): 10 | super().__init__(**kwargs) 11 | 12 | def set_model_path(self): 13 | """ No model required for Manual Detector """ 14 | return None 15 | 16 | def initialize(self, *args, **kwargs): 17 | """ Create the mtcnn detector """ 18 | print("Initializing Manual Detector...") 19 | super().initialize(*args, **kwargs) 20 | self.init.set() 21 | print("Initialized Manual Detector.") 22 | 23 | def detect_faces(self, *args, **kwargs): 24 | """ Return the given bounding box in a dlib rectangle """ 25 | super().detect_faces(*args, **kwargs) 26 | while True: 27 | item = self.queues["in"].get() 28 | if item == "EOF": 29 | break 30 | image, face = item 31 | 32 | bounding_box = [dlib.rectangle(int(face[0]), int(face[1]), 33 | int(face[2]), int(face[3]))] 34 | retval = {"image": image, 35 | "detected_faces": bounding_box} 36 | self.finalize(retval) 37 | 38 | self.queues["out"].put("EOF") 39 | -------------------------------------------------------------------------------- /plugins/model/Model_GAN/Model.py: -------------------------------------------------------------------------------- 1 | # Based on the https://github.com/shaoanlu/faceswap-GAN repo (master/temp/faceswap_GAN_keras.ipynb) 2 | 3 | from keras.models import Model 4 | from keras.layers import * 5 | from keras.layers.advanced_activations import LeakyReLU 6 | from keras.activations import relu 7 | from keras.initializers import RandomNormal 8 | from keras.applications import * 9 | from keras.optimizers import Adam 10 | 11 | from lib.PixelShuffler import PixelShuffler 12 | from .instance_normalization import InstanceNormalization 13 | from lib.utils import backup_file 14 | 15 | from keras.utils import multi_gpu_model 16 | 17 | hdf = {'netGAH5': 'netGA_GAN.h5', 18 | 'netGBH5': 'netGB_GAN.h5', 19 | 'netDAH5': 'netDA_GAN.h5', 20 | 'netDBH5': 'netDB_GAN.h5'} 21 | 22 | def __conv_init(a): 23 | print("conv_init", a) 24 | k = RandomNormal(0, 0.02)(a) # for convolution kernel 25 | k.conv_weight = True 26 | return k 27 | 28 | #def batchnorm(): 29 | # return BatchNormalization(momentum=0.9, axis=channel_axis, epsilon=1.01e-5, gamma_initializer = gamma_init) 30 | 31 | def inst_norm(): 32 | return InstanceNormalization() 33 | 34 | conv_init = RandomNormal(0, 0.02) 35 | gamma_init = RandomNormal(1., 0.02) # for batch normalization 36 | 37 | class GANModel(): 38 | img_size = 64 39 | channels = 3 40 | img_shape = (img_size, img_size, channels) 41 | encoded_dim = 1024 42 | nc_in = 3 # number of input channels of generators 43 | nc_D_inp = 6 # number of input channels of discriminators 44 | 45 | def __init__(self, model_dir, gpus): 46 | self.model_dir = model_dir 47 | self.gpus = gpus 48 | 49 | optimizer = Adam(1e-4, 0.5) 50 | 51 | # Build and compile the discriminator 52 | self.netDA, self.netDB = self.build_discriminator() 53 | 54 | # Build and compile the generator 55 | self.netGA, self.netGB = self.build_generator() 56 | 57 | def converter(self, swap): 58 | predictor = self.netGB if not swap else self.netGA 59 | return lambda img: predictor.predict(img) 60 | 61 | def build_generator(self): 62 | 63 | def conv_block(input_tensor, f): 64 | x = input_tensor 65 | x = Conv2D(f, kernel_size=3, strides=2, kernel_initializer=conv_init, use_bias=False, padding="same")(x) 66 | x = Activation("relu")(x) 67 | return x 68 | 69 | def res_block(input_tensor, f): 70 | x = input_tensor 71 | x = Conv2D(f, kernel_size=3, kernel_initializer=conv_init, use_bias=False, padding="same")(x) 72 | x = LeakyReLU(alpha=0.2)(x) 73 | x = Conv2D(f, kernel_size=3, kernel_initializer=conv_init, use_bias=False, padding="same")(x) 74 | x = add([x, input_tensor]) 75 | x = LeakyReLU(alpha=0.2)(x) 76 | return x 77 | 78 | def upscale_ps(filters, use_instance_norm=True): 79 | def block(x): 80 | x = Conv2D(filters*4, kernel_size=3, use_bias=False, kernel_initializer=RandomNormal(0, 0.02), padding='same')(x) 81 | x = LeakyReLU(0.1)(x) 82 | x = PixelShuffler()(x) 83 | return x 84 | return block 85 | 86 | def Encoder(nc_in=3, input_size=64): 87 | inp = Input(shape=(input_size, input_size, nc_in)) 88 | x = Conv2D(64, kernel_size=5, kernel_initializer=conv_init, use_bias=False, padding="same")(inp) 89 | x = conv_block(x,128) 90 | x = conv_block(x,256) 91 | x = conv_block(x,512) 92 | x = conv_block(x,1024) 93 | x = Dense(1024)(Flatten()(x)) 94 | x = Dense(4*4*1024)(x) 95 | x = Reshape((4, 4, 1024))(x) 96 | out = upscale_ps(512)(x) 97 | return Model(inputs=inp, outputs=out) 98 | 99 | def Decoder_ps(nc_in=512, input_size=8): 100 | input_ = Input(shape=(input_size, input_size, nc_in)) 101 | x = input_ 102 | x = upscale_ps(256)(x) 103 | x = upscale_ps(128)(x) 104 | x = upscale_ps(64)(x) 105 | x = res_block(x, 64) 106 | x = res_block(x, 64) 107 | #x = Conv2D(4, kernel_size=5, padding='same')(x) 108 | alpha = Conv2D(1, kernel_size=5, padding='same', activation="sigmoid")(x) 109 | rgb = Conv2D(3, kernel_size=5, padding='same', activation="tanh")(x) 110 | out = concatenate([alpha, rgb]) 111 | return Model(input_, out ) 112 | 113 | encoder = Encoder() 114 | decoder_A = Decoder_ps() 115 | decoder_B = Decoder_ps() 116 | x = Input(shape=self.img_shape) 117 | netGA = Model(x, decoder_A(encoder(x))) 118 | netGB = Model(x, decoder_B(encoder(x))) 119 | 120 | self.netGA_sm = netGA 121 | self.netGB_sm = netGB 122 | 123 | try: 124 | netGA.load_weights(str(self.model_dir / hdf['netGAH5'])) 125 | netGB.load_weights(str(self.model_dir / hdf['netGBH5'])) 126 | print ("Generator models loaded.") 127 | except: 128 | print ("Generator weights files not found.") 129 | pass 130 | 131 | if self.gpus > 1: 132 | netGA = multi_gpu_model( self.netGA_sm , self.gpus) 133 | netGB = multi_gpu_model( self.netGB_sm , self.gpus) 134 | 135 | return netGA, netGB 136 | 137 | def build_discriminator(self): 138 | def conv_block_d(input_tensor, f, use_instance_norm=True): 139 | x = input_tensor 140 | x = Conv2D(f, kernel_size=4, strides=2, kernel_initializer=conv_init, use_bias=False, padding="same")(x) 141 | x = LeakyReLU(alpha=0.2)(x) 142 | return x 143 | 144 | def Discriminator(nc_in, input_size=64): 145 | inp = Input(shape=(input_size, input_size, nc_in)) 146 | #x = GaussianNoise(0.05)(inp) 147 | x = conv_block_d(inp, 64, False) 148 | x = conv_block_d(x, 128, False) 149 | x = conv_block_d(x, 256, False) 150 | out = Conv2D(1, kernel_size=4, kernel_initializer=conv_init, use_bias=False, padding="same", activation="sigmoid")(x) 151 | return Model(inputs=[inp], outputs=out) 152 | 153 | netDA = Discriminator(self.nc_D_inp) 154 | netDB = Discriminator(self.nc_D_inp) 155 | try: 156 | netDA.load_weights(str(self.model_dir / hdf['netDAH5'])) 157 | netDB.load_weights(str(self.model_dir / hdf['netDBH5'])) 158 | print ("Discriminator models loaded.") 159 | except: 160 | print ("Discriminator weights files not found.") 161 | pass 162 | return netDA, netDB 163 | 164 | def load(self, swapped): 165 | if swapped: 166 | print("swapping not supported on GAN") 167 | # TODO load is done in __init__ => look how to swap if possible 168 | return True 169 | 170 | def save_weights(self): 171 | model_dir = str(self.model_dir) 172 | for model in hdf.values(): 173 | backup_file(model_dir, model) 174 | if self.gpus > 1: 175 | self.netGA_sm.save_weights(str(self.model_dir / hdf['netGAH5'])) 176 | self.netGB_sm.save_weights(str(self.model_dir / hdf['netGBH5'])) 177 | else: 178 | self.netGA.save_weights(str(self.model_dir / hdf['netGAH5'])) 179 | self.netGB.save_weights(str(self.model_dir / hdf['netGBH5'])) 180 | self.netDA.save_weights(str(self.model_dir / hdf['netDAH5'])) 181 | self.netDB.save_weights(str(self.model_dir / hdf['netDBH5'])) 182 | print ("Models saved.") 183 | -------------------------------------------------------------------------------- /plugins/model/Model_GAN/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | __author__ = """Based on https://github.com/shaoanlu/""" 4 | __version__ = '0.1.0' 5 | 6 | from .Model import GANModel as Model 7 | from .Trainer import Trainer -------------------------------------------------------------------------------- /plugins/model/Model_GAN/instance_normalization.py: -------------------------------------------------------------------------------- 1 | from keras.engine import Layer, InputSpec 2 | from keras import initializers, regularizers, constraints 3 | from keras import backend as K 4 | from keras.utils.generic_utils import get_custom_objects 5 | 6 | import numpy as np 7 | 8 | 9 | class InstanceNormalization(Layer): 10 | """Instance normalization layer (Lei Ba et al, 2016, Ulyanov et al., 2016). 11 | Normalize the activations of the previous layer at each step, 12 | i.e. applies a transformation that maintains the mean activation 13 | close to 0 and the activation standard deviation close to 1. 14 | # Arguments 15 | axis: Integer, the axis that should be normalized 16 | (typically the features axis). 17 | For instance, after a `Conv2D` layer with 18 | `data_format="channels_first"`, 19 | set `axis=1` in `InstanceNormalization`. 20 | Setting `axis=None` will normalize all values in each instance of the batch. 21 | Axis 0 is the batch dimension. `axis` cannot be set to 0 to avoid errors. 22 | epsilon: Small float added to variance to avoid dividing by zero. 23 | center: If True, add offset of `beta` to normalized tensor. 24 | If False, `beta` is ignored. 25 | scale: If True, multiply by `gamma`. 26 | If False, `gamma` is not used. 27 | When the next layer is linear (also e.g. `nn.relu`), 28 | this can be disabled since the scaling 29 | will be done by the next layer. 30 | beta_initializer: Initializer for the beta weight. 31 | gamma_initializer: Initializer for the gamma weight. 32 | beta_regularizer: Optional regularizer for the beta weight. 33 | gamma_regularizer: Optional regularizer for the gamma weight. 34 | beta_constraint: Optional constraint for the beta weight. 35 | gamma_constraint: Optional constraint for the gamma weight. 36 | # Input shape 37 | Arbitrary. Use the keyword argument `input_shape` 38 | (tuple of integers, does not include the samples axis) 39 | when using this layer as the first layer in a model. 40 | # Output shape 41 | Same shape as input. 42 | # References 43 | - [Layer Normalization](https://arxiv.org/abs/1607.06450) 44 | - [Instance Normalization: The Missing Ingredient for Fast Stylization](https://arxiv.org/abs/1607.08022) 45 | """ 46 | def __init__(self, 47 | axis=None, 48 | epsilon=1e-3, 49 | center=True, 50 | scale=True, 51 | beta_initializer='zeros', 52 | gamma_initializer='ones', 53 | beta_regularizer=None, 54 | gamma_regularizer=None, 55 | beta_constraint=None, 56 | gamma_constraint=None, 57 | **kwargs): 58 | super(InstanceNormalization, self).__init__(**kwargs) 59 | self.supports_masking = True 60 | self.axis = axis 61 | self.epsilon = epsilon 62 | self.center = center 63 | self.scale = scale 64 | self.beta_initializer = initializers.get(beta_initializer) 65 | self.gamma_initializer = initializers.get(gamma_initializer) 66 | self.beta_regularizer = regularizers.get(beta_regularizer) 67 | self.gamma_regularizer = regularizers.get(gamma_regularizer) 68 | self.beta_constraint = constraints.get(beta_constraint) 69 | self.gamma_constraint = constraints.get(gamma_constraint) 70 | 71 | def build(self, input_shape): 72 | ndim = len(input_shape) 73 | if self.axis == 0: 74 | raise ValueError('Axis cannot be zero') 75 | 76 | if (self.axis is not None) and (ndim == 2): 77 | raise ValueError('Cannot specify axis for rank 1 tensor') 78 | 79 | self.input_spec = InputSpec(ndim=ndim) 80 | 81 | if self.axis is None: 82 | shape = (1,) 83 | else: 84 | shape = (input_shape[self.axis],) 85 | 86 | if self.scale: 87 | self.gamma = self.add_weight(shape=shape, 88 | name='gamma', 89 | initializer=self.gamma_initializer, 90 | regularizer=self.gamma_regularizer, 91 | constraint=self.gamma_constraint) 92 | else: 93 | self.gamma = None 94 | if self.center: 95 | self.beta = self.add_weight(shape=shape, 96 | name='beta', 97 | initializer=self.beta_initializer, 98 | regularizer=self.beta_regularizer, 99 | constraint=self.beta_constraint) 100 | else: 101 | self.beta = None 102 | self.built = True 103 | 104 | def call(self, inputs, training=None): 105 | input_shape = K.int_shape(inputs) 106 | reduction_axes = list(range(0, len(input_shape))) 107 | 108 | if (self.axis is not None): 109 | del reduction_axes[self.axis] 110 | 111 | del reduction_axes[0] 112 | 113 | mean = K.mean(inputs, reduction_axes, keepdims=True) 114 | stddev = K.std(inputs, reduction_axes, keepdims=True) + self.epsilon 115 | normed = (inputs - mean) / stddev 116 | 117 | broadcast_shape = [1] * len(input_shape) 118 | if self.axis is not None: 119 | broadcast_shape[self.axis] = input_shape[self.axis] 120 | 121 | if self.scale: 122 | broadcast_gamma = K.reshape(self.gamma, broadcast_shape) 123 | normed = normed * broadcast_gamma 124 | if self.center: 125 | broadcast_beta = K.reshape(self.beta, broadcast_shape) 126 | normed = normed + broadcast_beta 127 | return normed 128 | 129 | def get_config(self): 130 | config = { 131 | 'axis': self.axis, 132 | 'epsilon': self.epsilon, 133 | 'center': self.center, 134 | 'scale': self.scale, 135 | 'beta_initializer': initializers.serialize(self.beta_initializer), 136 | 'gamma_initializer': initializers.serialize(self.gamma_initializer), 137 | 'beta_regularizer': regularizers.serialize(self.beta_regularizer), 138 | 'gamma_regularizer': regularizers.serialize(self.gamma_regularizer), 139 | 'beta_constraint': constraints.serialize(self.beta_constraint), 140 | 'gamma_constraint': constraints.serialize(self.gamma_constraint) 141 | } 142 | base_config = super(InstanceNormalization, self).get_config() 143 | return dict(list(base_config.items()) + list(config.items())) 144 | 145 | get_custom_objects().update({'InstanceNormalization': InstanceNormalization}) 146 | -------------------------------------------------------------------------------- /plugins/model/Model_GAN128/Model.py: -------------------------------------------------------------------------------- 1 | # Based on the https://github.com/shaoanlu/faceswap-GAN repo 2 | # source : https://github.com/shaoanlu/faceswap-GAN/blob/master/FaceSwap_GAN_v2_sz128_train.ipynbtemp/faceswap_GAN_keras.ipynb 3 | 4 | from keras.models import Model 5 | from keras.layers import * 6 | from keras.layers.advanced_activations import LeakyReLU 7 | from keras.activations import relu 8 | from keras.initializers import RandomNormal 9 | from keras.applications import * 10 | from keras.optimizers import Adam 11 | 12 | from lib.PixelShuffler import PixelShuffler 13 | from .instance_normalization import InstanceNormalization 14 | from lib.utils import backup_file 15 | 16 | from keras.utils import multi_gpu_model 17 | 18 | hdf = {'netGAH5':'netGA_GAN128.h5', 19 | 'netGBH5': 'netGB_GAN128.h5', 20 | 'netDAH5': 'netDA_GAN128.h5', 21 | 'netDBH5': 'netDB_GAN128.h5'} 22 | 23 | def __conv_init(a): 24 | print("conv_init", a) 25 | k = RandomNormal(0, 0.02)(a) # for convolution kernel 26 | k.conv_weight = True 27 | return k 28 | 29 | #def batchnorm(): 30 | # return BatchNormalization(momentum=0.9, axis=channel_axis, epsilon=1.01e-5, gamma_initializer = gamma_init) 31 | 32 | def inst_norm(): 33 | return InstanceNormalization() 34 | 35 | conv_init = RandomNormal(0, 0.02) 36 | gamma_init = RandomNormal(1., 0.02) # for batch normalization 37 | 38 | class GANModel(): 39 | img_size = 128 40 | channels = 3 41 | img_shape = (img_size, img_size, channels) 42 | encoded_dim = 1024 43 | nc_in = 3 # number of input channels of generators 44 | nc_D_inp = 6 # number of input channels of discriminators 45 | 46 | def __init__(self, model_dir, gpus): 47 | self.model_dir = model_dir 48 | self.gpus = gpus 49 | 50 | optimizer = Adam(1e-4, 0.5) 51 | 52 | # Build and compile the discriminator 53 | self.netDA, self.netDB = self.build_discriminator() 54 | 55 | # Build and compile the generator 56 | self.netGA, self.netGB = self.build_generator() 57 | 58 | def converter(self, swap): 59 | predictor = self.netGB if not swap else self.netGA 60 | return lambda img: predictor.predict(img) 61 | 62 | def build_generator(self): 63 | 64 | def conv_block(input_tensor, f, use_instance_norm=True): 65 | x = input_tensor 66 | x = SeparableConv2D(f, kernel_size=3, strides=2, kernel_initializer=conv_init, use_bias=False, padding="same")(x) 67 | if use_instance_norm: 68 | x = inst_norm()(x) 69 | x = Activation("relu")(x) 70 | return x 71 | 72 | def res_block(input_tensor, f, dilation=1): 73 | x = input_tensor 74 | x = Conv2D(f, kernel_size=3, kernel_initializer=conv_init, use_bias=False, padding="same", dilation_rate=dilation)(x) 75 | x = LeakyReLU(alpha=0.2)(x) 76 | x = Conv2D(f, kernel_size=3, kernel_initializer=conv_init, use_bias=False, padding="same", dilation_rate=dilation)(x) 77 | x = add([x, input_tensor]) 78 | #x = LeakyReLU(alpha=0.2)(x) 79 | return x 80 | 81 | def upscale_ps(filters, use_instance_norm=True): 82 | def block(x, use_instance_norm=use_instance_norm): 83 | x = Conv2D(filters*4, kernel_size=3, use_bias=False, kernel_initializer=RandomNormal(0, 0.02), padding='same')(x) 84 | if use_instance_norm: 85 | x = inst_norm()(x) 86 | x = LeakyReLU(0.1)(x) 87 | x = PixelShuffler()(x) 88 | return x 89 | return block 90 | 91 | def Encoder(nc_in=3, input_size=128): 92 | inp = Input(shape=(input_size, input_size, nc_in)) 93 | x = Conv2D(32, kernel_size=5, kernel_initializer=conv_init, use_bias=False, padding="same")(inp) 94 | x = conv_block(x,64, use_instance_norm=False) 95 | x = conv_block(x,128) 96 | x = conv_block(x,256) 97 | x = conv_block(x,512) 98 | x = conv_block(x,1024) 99 | x = Dense(1024)(Flatten()(x)) 100 | x = Dense(4*4*1024)(x) 101 | x = Reshape((4, 4, 1024))(x) 102 | out = upscale_ps(512)(x) 103 | return Model(inputs=inp, outputs=out) 104 | 105 | def Decoder_ps(nc_in=512, input_size=8): 106 | input_ = Input(shape=(input_size, input_size, nc_in)) 107 | x = input_ 108 | x = upscale_ps(256)(x) 109 | x = upscale_ps(128)(x) 110 | x = upscale_ps(64)(x) 111 | x = res_block(x, 64, dilation=2) 112 | 113 | out64 = Conv2D(64, kernel_size=3, padding='same')(x) 114 | out64 = LeakyReLU(alpha=0.1)(out64) 115 | out64 = Conv2D(3, kernel_size=5, padding='same', activation="tanh")(out64) 116 | 117 | x = upscale_ps(32)(x) 118 | x = res_block(x, 32) 119 | x = res_block(x, 32) 120 | alpha = Conv2D(1, kernel_size=5, padding='same', activation="sigmoid")(x) 121 | rgb = Conv2D(3, kernel_size=5, padding='same', activation="tanh")(x) 122 | out = concatenate([alpha, rgb]) 123 | return Model(input_, [out, out64] ) 124 | 125 | encoder = Encoder() 126 | decoder_A = Decoder_ps() 127 | decoder_B = Decoder_ps() 128 | x = Input(shape=self.img_shape) 129 | netGA = Model(x, decoder_A(encoder(x))) 130 | netGB = Model(x, decoder_B(encoder(x))) 131 | netGA.output_names = ["netGA_out_1", "netGA_out_2"] # Workarounds till https://github.com/keras-team/keras/issues/8962 is fixed. 132 | netGB.output_names = ["netGB_out_1", "netGB_out_2"] # 133 | 134 | self.netGA_sm = netGA 135 | self.netGB_sm = netGB 136 | 137 | try: 138 | netGA.load_weights(str(self.model_dir / hdf['netGAH5'])) 139 | netGB.load_weights(str(self.model_dir / hdf['netGBH5'])) 140 | print ("Generator models loaded.") 141 | except: 142 | print ("Generator weights files not found.") 143 | pass 144 | 145 | if self.gpus > 1: 146 | netGA = multi_gpu_model( self.netGA_sm , self.gpus) 147 | netGB = multi_gpu_model( self.netGB_sm , self.gpus) 148 | 149 | return netGA, netGB 150 | 151 | def build_discriminator(self): 152 | def conv_block_d(input_tensor, f, use_instance_norm=True): 153 | x = input_tensor 154 | x = Conv2D(f, kernel_size=4, strides=2, kernel_initializer=conv_init, use_bias=False, padding="same")(x) 155 | if use_instance_norm: 156 | x = inst_norm()(x) 157 | x = LeakyReLU(alpha=0.2)(x) 158 | return x 159 | 160 | def Discriminator(nc_in, input_size=128): 161 | inp = Input(shape=(input_size, input_size, nc_in)) 162 | #x = GaussianNoise(0.05)(inp) 163 | x = conv_block_d(inp, 64, False) 164 | x = conv_block_d(x, 128, True) 165 | x = conv_block_d(x, 256, True) 166 | x = conv_block_d(x, 512, True) 167 | out = Conv2D(1, kernel_size=4, kernel_initializer=conv_init, use_bias=False, padding="same", activation="sigmoid")(x) 168 | return Model(inputs=[inp], outputs=out) 169 | 170 | netDA = Discriminator(self.nc_D_inp) 171 | netDB = Discriminator(self.nc_D_inp) 172 | 173 | try: 174 | netDA.load_weights(str(self.model_dir / hdf['netDAH5'])) 175 | netDB.load_weights(str(self.model_dir / hdf['netDBH5'])) 176 | print ("Discriminator models loaded.") 177 | except: 178 | print ("Discriminator weights files not found.") 179 | pass 180 | return netDA, netDB 181 | 182 | def load(self, swapped): 183 | if swapped: 184 | print("swapping not supported on GAN") 185 | # TODO load is done in __init__ => look how to swap if possible 186 | return True 187 | 188 | def save_weights(self): 189 | model_dir = str(self.model_dir) 190 | for model in hdf.values(): 191 | backup_file(model_dir, model) 192 | if self.gpus > 1: 193 | self.netGA_sm.save_weights(str(self.model_dir / hdf['netGAH5'])) 194 | self.netGB_sm.save_weights(str(self.model_dir / hdf['netGBH5'])) 195 | else: 196 | self.netGA.save_weights(str(self.model_dir / hdf['netGAH5'])) 197 | self.netGB.save_weights(str(self.model_dir / hdf['netGBH5'])) 198 | self.netDA.save_weights(str(self.model_dir / hdf['netDAH5'])) 199 | self.netDB.save_weights(str(self.model_dir / hdf['netDBH5'])) 200 | print ("Models saved.") 201 | -------------------------------------------------------------------------------- /plugins/model/Model_GAN128/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | __author__ = """Based on https://github.com/shaoanlu/""" 4 | __version__ = '0.1.0' 5 | 6 | from .Model import GANModel as Model 7 | from .Trainer import Trainer -------------------------------------------------------------------------------- /plugins/model/Model_GAN128/instance_normalization.py: -------------------------------------------------------------------------------- 1 | from keras.engine import Layer, InputSpec 2 | from keras import initializers, regularizers, constraints 3 | from keras import backend as K 4 | from keras.utils.generic_utils import get_custom_objects 5 | 6 | import numpy as np 7 | 8 | 9 | class InstanceNormalization(Layer): 10 | """Instance normalization layer (Lei Ba et al, 2016, Ulyanov et al., 2016). 11 | Normalize the activations of the previous layer at each step, 12 | i.e. applies a transformation that maintains the mean activation 13 | close to 0 and the activation standard deviation close to 1. 14 | # Arguments 15 | axis: Integer, the axis that should be normalized 16 | (typically the features axis). 17 | For instance, after a `Conv2D` layer with 18 | `data_format="channels_first"`, 19 | set `axis=1` in `InstanceNormalization`. 20 | Setting `axis=None` will normalize all values in each instance of the batch. 21 | Axis 0 is the batch dimension. `axis` cannot be set to 0 to avoid errors. 22 | epsilon: Small float added to variance to avoid dividing by zero. 23 | center: If True, add offset of `beta` to normalized tensor. 24 | If False, `beta` is ignored. 25 | scale: If True, multiply by `gamma`. 26 | If False, `gamma` is not used. 27 | When the next layer is linear (also e.g. `nn.relu`), 28 | this can be disabled since the scaling 29 | will be done by the next layer. 30 | beta_initializer: Initializer for the beta weight. 31 | gamma_initializer: Initializer for the gamma weight. 32 | beta_regularizer: Optional regularizer for the beta weight. 33 | gamma_regularizer: Optional regularizer for the gamma weight. 34 | beta_constraint: Optional constraint for the beta weight. 35 | gamma_constraint: Optional constraint for the gamma weight. 36 | # Input shape 37 | Arbitrary. Use the keyword argument `input_shape` 38 | (tuple of integers, does not include the samples axis) 39 | when using this layer as the first layer in a model. 40 | # Output shape 41 | Same shape as input. 42 | # References 43 | - [Layer Normalization](https://arxiv.org/abs/1607.06450) 44 | - [Instance Normalization: The Missing Ingredient for Fast Stylization](https://arxiv.org/abs/1607.08022) 45 | """ 46 | def __init__(self, 47 | axis=None, 48 | epsilon=1e-3, 49 | center=True, 50 | scale=True, 51 | beta_initializer='zeros', 52 | gamma_initializer='ones', 53 | beta_regularizer=None, 54 | gamma_regularizer=None, 55 | beta_constraint=None, 56 | gamma_constraint=None, 57 | **kwargs): 58 | super(InstanceNormalization, self).__init__(**kwargs) 59 | self.supports_masking = True 60 | self.axis = axis 61 | self.epsilon = epsilon 62 | self.center = center 63 | self.scale = scale 64 | self.beta_initializer = initializers.get(beta_initializer) 65 | self.gamma_initializer = initializers.get(gamma_initializer) 66 | self.beta_regularizer = regularizers.get(beta_regularizer) 67 | self.gamma_regularizer = regularizers.get(gamma_regularizer) 68 | self.beta_constraint = constraints.get(beta_constraint) 69 | self.gamma_constraint = constraints.get(gamma_constraint) 70 | 71 | def build(self, input_shape): 72 | ndim = len(input_shape) 73 | if self.axis == 0: 74 | raise ValueError('Axis cannot be zero') 75 | 76 | if (self.axis is not None) and (ndim == 2): 77 | raise ValueError('Cannot specify axis for rank 1 tensor') 78 | 79 | self.input_spec = InputSpec(ndim=ndim) 80 | 81 | if self.axis is None: 82 | shape = (1,) 83 | else: 84 | shape = (input_shape[self.axis],) 85 | 86 | if self.scale: 87 | self.gamma = self.add_weight(shape=shape, 88 | name='gamma', 89 | initializer=self.gamma_initializer, 90 | regularizer=self.gamma_regularizer, 91 | constraint=self.gamma_constraint) 92 | else: 93 | self.gamma = None 94 | if self.center: 95 | self.beta = self.add_weight(shape=shape, 96 | name='beta', 97 | initializer=self.beta_initializer, 98 | regularizer=self.beta_regularizer, 99 | constraint=self.beta_constraint) 100 | else: 101 | self.beta = None 102 | self.built = True 103 | 104 | def call(self, inputs, training=None): 105 | input_shape = K.int_shape(inputs) 106 | reduction_axes = list(range(0, len(input_shape))) 107 | 108 | if (self.axis is not None): 109 | del reduction_axes[self.axis] 110 | 111 | del reduction_axes[0] 112 | 113 | mean = K.mean(inputs, reduction_axes, keepdims=True) 114 | stddev = K.std(inputs, reduction_axes, keepdims=True) + self.epsilon 115 | normed = (inputs - mean) / stddev 116 | 117 | broadcast_shape = [1] * len(input_shape) 118 | if self.axis is not None: 119 | broadcast_shape[self.axis] = input_shape[self.axis] 120 | 121 | if self.scale: 122 | broadcast_gamma = K.reshape(self.gamma, broadcast_shape) 123 | normed = normed * broadcast_gamma 124 | if self.center: 125 | broadcast_beta = K.reshape(self.beta, broadcast_shape) 126 | normed = normed + broadcast_beta 127 | return normed 128 | 129 | def get_config(self): 130 | config = { 131 | 'axis': self.axis, 132 | 'epsilon': self.epsilon, 133 | 'center': self.center, 134 | 'scale': self.scale, 135 | 'beta_initializer': initializers.serialize(self.beta_initializer), 136 | 'gamma_initializer': initializers.serialize(self.gamma_initializer), 137 | 'beta_regularizer': regularizers.serialize(self.beta_regularizer), 138 | 'gamma_regularizer': regularizers.serialize(self.gamma_regularizer), 139 | 'beta_constraint': constraints.serialize(self.beta_constraint), 140 | 'gamma_constraint': constraints.serialize(self.gamma_constraint) 141 | } 142 | base_config = super(InstanceNormalization, self).get_config() 143 | return dict(list(base_config.items()) + list(config.items())) 144 | 145 | get_custom_objects().update({'InstanceNormalization': InstanceNormalization}) 146 | -------------------------------------------------------------------------------- /plugins/model/Model_IAE/AutoEncoder.py: -------------------------------------------------------------------------------- 1 | # Improved-AutoEncoder base classes 2 | 3 | from lib.utils import backup_file 4 | 5 | hdf = {'encoderH5': 'IAE_encoder.h5', 6 | 'decoderH5': 'IAE_decoder.h5', 7 | 'inter_AH5': 'IAE_inter_A.h5', 8 | 'inter_BH5': 'IAE_inter_B.h5', 9 | 'inter_bothH5': 'IAE_inter_both.h5'} 10 | 11 | 12 | class AutoEncoder: 13 | def __init__(self, model_dir, gpus): 14 | self.model_dir = model_dir 15 | self.gpus = gpus 16 | 17 | self.encoder = self.Encoder() 18 | self.decoder = self.Decoder() 19 | self.inter_A = self.Intermidiate() 20 | self.inter_B = self.Intermidiate() 21 | self.inter_both = self.Intermidiate() 22 | 23 | self.initModel() 24 | 25 | def load(self, swapped): 26 | (face_A,face_B) = (hdf['inter_AH5'], hdf['inter_BH5']) if not swapped else (hdf['inter_BH5'], hdf['inter_AH5']) 27 | 28 | try: 29 | self.encoder.load_weights(str(self.model_dir / hdf['encoderH5'])) 30 | self.decoder.load_weights(str(self.model_dir / hdf['decoderH5'])) 31 | self.inter_both.load_weights(str(self.model_dir / hdf['inter_bothH5'])) 32 | self.inter_A.load_weights(str(self.model_dir / face_A)) 33 | self.inter_B.load_weights(str(self.model_dir / face_B)) 34 | print('loaded model weights') 35 | return True 36 | except Exception as e: 37 | print('Failed loading existing training data.') 38 | print(e) 39 | return False 40 | 41 | def save_weights(self): 42 | model_dir = str(self.model_dir) 43 | for model in hdf.values(): 44 | backup_file(model_dir, model) 45 | self.encoder.save_weights(str(self.model_dir / hdf['encoderH5'])) 46 | self.decoder.save_weights(str(self.model_dir / hdf['decoderH5'])) 47 | self.inter_both.save_weights(str(self.model_dir / hdf['inter_bothH5'])) 48 | self.inter_A.save_weights(str(self.model_dir / hdf['inter_AH5'])) 49 | self.inter_B.save_weights(str(self.model_dir / hdf['inter_BH5'])) 50 | print('saved model weights') 51 | -------------------------------------------------------------------------------- /plugins/model/Model_IAE/Model.py: -------------------------------------------------------------------------------- 1 | # Improved autoencoder for faceswap. 2 | 3 | from keras.models import Model as KerasModel 4 | from keras.layers import Input, Dense, Flatten, Reshape, Concatenate 5 | from keras.layers.advanced_activations import LeakyReLU 6 | from keras.layers.convolutional import Conv2D 7 | from keras.optimizers import Adam 8 | 9 | from .AutoEncoder import AutoEncoder 10 | from lib.PixelShuffler import PixelShuffler 11 | 12 | from keras.utils import multi_gpu_model 13 | 14 | IMAGE_SHAPE = (64, 64, 3) 15 | ENCODER_DIM = 1024 16 | 17 | class Model(AutoEncoder): 18 | def initModel(self): 19 | optimizer = Adam(lr=5e-5, beta_1=0.5, beta_2=0.999) 20 | x = Input(shape=IMAGE_SHAPE) 21 | 22 | self.autoencoder_A = KerasModel(x, self.decoder(Concatenate()([self.inter_A(self.encoder(x)), self.inter_both(self.encoder(x))]))) 23 | self.autoencoder_B = KerasModel(x, self.decoder(Concatenate()([self.inter_B(self.encoder(x)), self.inter_both(self.encoder(x))]))) 24 | 25 | if self.gpus > 1: 26 | self.autoencoder_A = multi_gpu_model( self.autoencoder_A , self.gpus) 27 | self.autoencoder_B = multi_gpu_model( self.autoencoder_B , self.gpus) 28 | 29 | self.autoencoder_A.compile(optimizer=optimizer, loss='mean_absolute_error') 30 | self.autoencoder_B.compile(optimizer=optimizer, loss='mean_absolute_error') 31 | 32 | def converter(self, swap): 33 | autoencoder = self.autoencoder_B if not swap else self.autoencoder_A 34 | return lambda img: autoencoder.predict(img) 35 | 36 | def conv(self, filters): 37 | def block(x): 38 | x = Conv2D(filters, kernel_size=5, strides=2, padding='same')(x) 39 | x = LeakyReLU(0.1)(x) 40 | return x 41 | return block 42 | 43 | def upscale(self, filters): 44 | def block(x): 45 | x = Conv2D(filters * 4, kernel_size=3, padding='same')(x) 46 | x = LeakyReLU(0.1)(x) 47 | x = PixelShuffler()(x) 48 | return x 49 | return block 50 | 51 | def Encoder(self): 52 | input_ = Input(shape=IMAGE_SHAPE) 53 | x = input_ 54 | x = self.conv(128)(x) 55 | x = self.conv(256)(x) 56 | x = self.conv(512)(x) 57 | x = self.conv(1024)(x) 58 | x = Flatten()(x) 59 | return KerasModel(input_, x) 60 | 61 | def Intermidiate(self): 62 | input_ = Input(shape=(None, 4 * 4 * 1024)) 63 | x = input_ 64 | x = Dense(ENCODER_DIM)(x) 65 | x = Dense(4 * 4 * int(ENCODER_DIM/2))(x) 66 | x = Reshape((4, 4, int(ENCODER_DIM/2)))(x) 67 | return KerasModel(input_, x) 68 | 69 | def Decoder(self): 70 | input_ = Input(shape=(4, 4, ENCODER_DIM)) 71 | x = input_ 72 | x = self.upscale(512)(x) 73 | x = self.upscale(256)(x) 74 | x = self.upscale(128)(x) 75 | x = self.upscale(64)(x) 76 | x = Conv2D(3, kernel_size=5, padding='same', activation='sigmoid')(x) 77 | return KerasModel(input_, x) 78 | -------------------------------------------------------------------------------- /plugins/model/Model_IAE/Trainer.py: -------------------------------------------------------------------------------- 1 | 2 | import time 3 | import numpy 4 | from lib.training_data import TrainingDataGenerator, stack_images 5 | 6 | 7 | class Trainer(): 8 | random_transform_args = { 9 | 'rotation_range': 10, 10 | 'zoom_range': 0.05, 11 | 'shift_range': 0.05, 12 | 'random_flip': 0.4, 13 | } 14 | 15 | def __init__(self, model, fn_A, fn_B, batch_size, *args): 16 | self.batch_size = batch_size 17 | self.model = model 18 | 19 | generator = TrainingDataGenerator(self.random_transform_args, 160) 20 | self.images_A = generator.minibatchAB(fn_A, self.batch_size) 21 | self.images_B = generator.minibatchAB(fn_B, self.batch_size) 22 | 23 | def train_one_step(self, iter, viewer): 24 | epoch, warped_A, target_A = next(self.images_A) 25 | epoch, warped_B, target_B = next(self.images_B) 26 | 27 | loss_A = self.model.autoencoder_A.train_on_batch(warped_A, target_A) 28 | loss_B = self.model.autoencoder_B.train_on_batch(warped_B, target_B) 29 | print("[{0}] [#{1:05d}] loss_A: {2:.5f}, loss_B: {3:.5f}".format(time.strftime("%H:%M:%S"), iter, loss_A, loss_B), 30 | end='\r') 31 | 32 | if viewer is not None: 33 | viewer(self.show_sample(target_A[0:14], target_B[0:14]), "training") 34 | 35 | def show_sample(self, test_A, test_B): 36 | figure_A = numpy.stack([ 37 | test_A, 38 | self.model.autoencoder_A.predict(test_A), 39 | self.model.autoencoder_B.predict(test_A), 40 | ], axis=1) 41 | figure_B = numpy.stack([ 42 | test_B, 43 | self.model.autoencoder_B.predict(test_B), 44 | self.model.autoencoder_A.predict(test_B), 45 | ], axis=1) 46 | 47 | figure = numpy.concatenate([figure_A, figure_B], axis=0) 48 | figure = figure.reshape((4, 7) + figure.shape[1:]) 49 | figure = stack_images(figure) 50 | 51 | return numpy.clip(figure * 255, 0, 255).astype('uint8') 52 | -------------------------------------------------------------------------------- /plugins/model/Model_IAE/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | __author__ = """acsaga""" 4 | __version__ = '0.1.0' 5 | 6 | from .Model import Model 7 | from .Trainer import Trainer 8 | from .AutoEncoder import AutoEncoder 9 | -------------------------------------------------------------------------------- /plugins/model/Model_LowMem/AutoEncoder.py: -------------------------------------------------------------------------------- 1 | # AutoEncoder base classes 2 | 3 | from lib.utils import backup_file 4 | 5 | hdf = {'encoderH5': 'lowmem_encoder.h5', 6 | 'decoder_AH5': 'lowmem_decoder_A.h5', 7 | 'decoder_BH5': 'lowmem_decoder_B.h5'} 8 | 9 | #Part of Filename migration, should be remopved some reasonable time after first added 10 | import os.path 11 | old_encoderH5 = 'encoder.h5' 12 | old_decoder_AH5 = 'decoder_A.h5' 13 | old_decoder_BH5 = 'decoder_B.h5' 14 | #End filename migration 15 | 16 | class AutoEncoder: 17 | def __init__(self, model_dir, gpus): 18 | self.model_dir = model_dir 19 | self.gpus = gpus 20 | 21 | self.encoder = self.Encoder() 22 | self.decoder_A = self.Decoder() 23 | self.decoder_B = self.Decoder() 24 | 25 | self.initModel() 26 | 27 | def load(self, swapped): 28 | (face_A,face_B) = (hdf['decoder_AH5'], hdf['decoder_BH5']) if not swapped else (hdf['decoder_BH5'], hdf['decoder_AH5']) 29 | 30 | try: 31 | #Part of Filename migration, should be remopved some reasonable time after first added 32 | if os.path.isfile(str(self.model_dir / old_encoderH5)): 33 | print('Migrating to new filenames: ', end='') 34 | if os.path.isfile(str(self.model_dir / hdf['encoderH5'])) is not True: 35 | os.rename(str(self.model_dir / old_decoder_AH5), str(self.model_dir / hdf['decoder_AH5'])) 36 | os.rename(str(self.model_dir / old_decoder_BH5), str(self.model_dir / hdf['decoder_BH5'])) 37 | os.rename(str(self.model_dir / old_encoderH5), str(self.model_dir / hdf['encoderH5'])) 38 | print('Complete') 39 | else: 40 | print('Failed due to existing files in folder. Loading already migrated files') 41 | #End filename migration 42 | self.encoder.load_weights(str(self.model_dir / hdf['encoderH5'])) 43 | self.decoder_A.load_weights(str(self.model_dir / face_A)) 44 | self.decoder_B.load_weights(str(self.model_dir / face_B)) 45 | print('loaded model weights') 46 | return True 47 | except Exception as e: 48 | print('Failed loading existing training data.') 49 | print(e) 50 | return False 51 | 52 | def save_weights(self): 53 | model_dir = str(self.model_dir) 54 | for model in hdf.values(): 55 | backup_file(model_dir, model) 56 | self.encoder.save_weights(str(self.model_dir / hdf['encoderH5'])) 57 | self.decoder_A.save_weights(str(self.model_dir / hdf['decoder_AH5'])) 58 | self.decoder_B.save_weights(str(self.model_dir / hdf['decoder_BH5'])) 59 | print('saved model weights') 60 | -------------------------------------------------------------------------------- /plugins/model/Model_LowMem/Model.py: -------------------------------------------------------------------------------- 1 | # Based on the original https://www.reddit.com/r/deepfakes/ code sample + contribs 2 | 3 | from keras.models import Model as KerasModel 4 | from keras.layers import Input, Dense, Flatten, Reshape 5 | from keras.layers.advanced_activations import LeakyReLU 6 | from keras.layers.convolutional import Conv2D 7 | from keras.optimizers import Adam 8 | 9 | from .AutoEncoder import AutoEncoder 10 | from lib.PixelShuffler import PixelShuffler 11 | 12 | from keras.utils import multi_gpu_model 13 | 14 | IMAGE_SHAPE = (64, 64, 3) 15 | ENCODER_DIM = 512 16 | 17 | class Model(AutoEncoder): 18 | def initModel(self): 19 | optimizer = Adam(lr=5e-5, beta_1=0.5, beta_2=0.999) 20 | x = Input(shape=IMAGE_SHAPE) 21 | 22 | self.autoencoder_A = KerasModel(x, self.decoder_A(self.encoder(x))) 23 | self.autoencoder_B = KerasModel(x, self.decoder_B(self.encoder(x))) 24 | 25 | if self.gpus > 1: 26 | self.autoencoder_A = multi_gpu_model( self.autoencoder_A , self.gpus) 27 | self.autoencoder_B = multi_gpu_model( self.autoencoder_B , self.gpus) 28 | 29 | self.autoencoder_A.compile(optimizer=optimizer, loss='mean_absolute_error') 30 | self.autoencoder_B.compile(optimizer=optimizer, loss='mean_absolute_error') 31 | 32 | def converter(self, swap): 33 | autoencoder = self.autoencoder_B if not swap else self.autoencoder_A 34 | return lambda img: autoencoder.predict(img) 35 | 36 | def conv(self, filters): 37 | def block(x): 38 | x = Conv2D(filters, kernel_size=5, strides=2, padding='same')(x) 39 | x = LeakyReLU(0.1)(x) 40 | return x 41 | return block 42 | 43 | def upscale(self, filters): 44 | def block(x): 45 | x = Conv2D(filters * 4, kernel_size=3, padding='same')(x) 46 | x = LeakyReLU(0.1)(x) 47 | x = PixelShuffler()(x) 48 | return x 49 | return block 50 | 51 | def Encoder(self): 52 | input_ = Input(shape=IMAGE_SHAPE) 53 | x = input_ 54 | x = self.conv(128)(x) 55 | x = self.conv(256)(x) 56 | x = self.conv(512)(x) 57 | x = Dense(ENCODER_DIM)(Flatten()(x)) 58 | x = Dense(4 * 4 * 1024)(x) 59 | x = Reshape((4, 4, 1024))(x) 60 | x = self.upscale(512)(x) 61 | return KerasModel(input_, x) 62 | 63 | def Decoder(self): 64 | input_ = Input(shape=(8, 8, 512)) 65 | x = input_ 66 | x = self.upscale(256)(x) 67 | x = self.upscale(128)(x) 68 | x = self.upscale(64)(x) 69 | x = Conv2D(3, kernel_size=5, padding='same', activation='sigmoid')(x) 70 | return KerasModel(input_, x) 71 | -------------------------------------------------------------------------------- /plugins/model/Model_LowMem/Trainer.py: -------------------------------------------------------------------------------- 1 | 2 | import time 3 | import numpy 4 | from lib.training_data import TrainingDataGenerator, stack_images 5 | 6 | class Trainer(): 7 | random_transform_args = { 8 | 'rotation_range': 10, 9 | 'zoom_range': 0.05, 10 | 'shift_range': 0.05, 11 | 'random_flip': 0.4, 12 | } 13 | 14 | def __init__(self, model, fn_A, fn_B, batch_size, *args): 15 | self.batch_size = batch_size 16 | self.model = model 17 | 18 | generator = TrainingDataGenerator(self.random_transform_args, 160) 19 | self.images_A = generator.minibatchAB(fn_A, self.batch_size) 20 | self.images_B = generator.minibatchAB(fn_B, self.batch_size) 21 | 22 | def train_one_step(self, iter, viewer): 23 | epoch, warped_A, target_A = next(self.images_A) 24 | epoch, warped_B, target_B = next(self.images_B) 25 | 26 | loss_A = self.model.autoencoder_A.train_on_batch(warped_A, target_A) 27 | loss_B = self.model.autoencoder_B.train_on_batch(warped_B, target_B) 28 | print("[{0}] [#{1:05d}] loss_A: {2:.5f}, loss_B: {3:.5f}".format(time.strftime("%H:%M:%S"), iter, loss_A, loss_B), 29 | end='\r') 30 | 31 | if viewer is not None: 32 | viewer(self.show_sample(target_A[0:14], target_B[0:14]), "training") 33 | 34 | def show_sample(self, test_A, test_B): 35 | figure_A = numpy.stack([ 36 | test_A, 37 | self.model.autoencoder_A.predict(test_A), 38 | self.model.autoencoder_B.predict(test_A), 39 | ], axis=1) 40 | figure_B = numpy.stack([ 41 | test_B, 42 | self.model.autoencoder_B.predict(test_B), 43 | self.model.autoencoder_A.predict(test_B), 44 | ], axis=1) 45 | 46 | if test_A.shape[0] % 2 == 1: 47 | figure_A = numpy.concatenate ([figure_A, numpy.expand_dims(figure_A[0],0) ]) 48 | figure_B = numpy.concatenate ([figure_B, numpy.expand_dims(figure_B[0],0) ]) 49 | 50 | figure = numpy.concatenate([figure_A, figure_B], axis=0) 51 | w = 4 52 | h = int( figure.shape[0] / w) 53 | figure = figure.reshape((w, h) + figure.shape[1:]) 54 | figure = stack_images(figure) 55 | 56 | return numpy.clip(figure * 255, 0, 255).astype('uint8') 57 | -------------------------------------------------------------------------------- /plugins/model/Model_LowMem/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | __author__ = """Based on https://reddit.com/u/deepfakes/""" 4 | __version__ = '0.1.0' 5 | 6 | from .Model import Model 7 | from .Trainer import Trainer 8 | from .AutoEncoder import AutoEncoder -------------------------------------------------------------------------------- /plugins/model/Model_Original/AutoEncoder.py: -------------------------------------------------------------------------------- 1 | # AutoEncoder base classes 2 | 3 | from lib.utils import backup_file 4 | from lib import Serializer 5 | from json import JSONDecodeError 6 | 7 | hdf = {'encoderH5': 'encoder.h5', 8 | 'decoder_AH5': 'decoder_A.h5', 9 | 'decoder_BH5': 'decoder_B.h5', 10 | 'state': 'state'} 11 | 12 | class AutoEncoder: 13 | def __init__(self, model_dir, gpus): 14 | self.model_dir = model_dir 15 | self.gpus = gpus 16 | 17 | self.encoder = self.Encoder() 18 | self.decoder_A = self.Decoder() 19 | self.decoder_B = self.Decoder() 20 | 21 | self.initModel() 22 | 23 | def load(self, swapped): 24 | serializer = Serializer.get_serializer('json') 25 | state_fn = ".".join([hdf['state'], serializer.ext]) 26 | try: 27 | with open(str(self.model_dir / state_fn), 'rb') as fp: 28 | state = serializer.unmarshal(fp.read().decode('utf-8')) 29 | self._epoch_no = state['epoch_no'] 30 | except IOError as e: 31 | print('Error loading training info:', e.strerror) 32 | self._epoch_no = 0 33 | except JSONDecodeError as e: 34 | print('Error loading training info:', e.msg) 35 | self._epoch_no = 0 36 | 37 | (face_A,face_B) = (hdf['decoder_AH5'], hdf['decoder_BH5']) if not swapped else (hdf['decoder_BH5'], hdf['decoder_AH5']) 38 | 39 | try: 40 | self.encoder.load_weights(str(self.model_dir / hdf['encoderH5'])) 41 | self.decoder_A.load_weights(str(self.model_dir / face_A)) 42 | self.decoder_B.load_weights(str(self.model_dir / face_B)) 43 | print('loaded model weights') 44 | return True 45 | except Exception as e: 46 | print('Failed loading existing training data.') 47 | print(e) 48 | return False 49 | 50 | def save_weights(self): 51 | model_dir = str(self.model_dir) 52 | for model in hdf.values(): 53 | backup_file(model_dir, model) 54 | self.encoder.save_weights(str(self.model_dir / hdf['encoderH5'])) 55 | self.decoder_A.save_weights(str(self.model_dir / hdf['decoder_AH5'])) 56 | self.decoder_B.save_weights(str(self.model_dir / hdf['decoder_BH5'])) 57 | 58 | print('saved model weights') 59 | 60 | serializer = Serializer.get_serializer('json') 61 | state_fn = ".".join([hdf['state'], serializer.ext]) 62 | state_dir = str(self.model_dir / state_fn) 63 | try: 64 | with open(state_dir, 'wb') as fp: 65 | state_json = serializer.marshal({ 66 | 'epoch_no' : self.epoch_no 67 | }) 68 | fp.write(state_json.encode('utf-8')) 69 | except IOError as e: 70 | print(e.strerror) 71 | 72 | @property 73 | def epoch_no(self): 74 | "Get current training epoch number" 75 | return self._epoch_no 76 | 77 | 78 | -------------------------------------------------------------------------------- /plugins/model/Model_Original/Model.py: -------------------------------------------------------------------------------- 1 | # Based on the original https://www.reddit.com/r/deepfakes/ code sample + contribs 2 | 3 | from keras.models import Model as KerasModel 4 | from keras.layers import Input, Dense, Flatten, Reshape 5 | from keras.layers.advanced_activations import LeakyReLU 6 | from keras.layers.convolutional import Conv2D 7 | from keras.optimizers import Adam 8 | 9 | from .AutoEncoder import AutoEncoder 10 | from lib.PixelShuffler import PixelShuffler 11 | 12 | from keras.utils import multi_gpu_model 13 | 14 | IMAGE_SHAPE = (64, 64, 3) 15 | ENCODER_DIM = 1024 16 | 17 | class Model(AutoEncoder): 18 | def initModel(self): 19 | optimizer = Adam(lr=5e-5, beta_1=0.5, beta_2=0.999) 20 | x = Input(shape=IMAGE_SHAPE) 21 | 22 | self.autoencoder_A = KerasModel(x, self.decoder_A(self.encoder(x))) 23 | self.autoencoder_B = KerasModel(x, self.decoder_B(self.encoder(x))) 24 | 25 | if self.gpus > 1: 26 | self.autoencoder_A = multi_gpu_model( self.autoencoder_A , self.gpus) 27 | self.autoencoder_B = multi_gpu_model( self.autoencoder_B , self.gpus) 28 | 29 | self.autoencoder_A.compile(optimizer=optimizer, loss='mean_absolute_error') 30 | self.autoencoder_B.compile(optimizer=optimizer, loss='mean_absolute_error') 31 | 32 | def converter(self, swap): 33 | autoencoder = self.autoencoder_B if not swap else self.autoencoder_A 34 | return lambda img: autoencoder.predict(img) 35 | 36 | def conv(self, filters): 37 | def block(x): 38 | x = Conv2D(filters, kernel_size=5, strides=2, padding='same')(x) 39 | x = LeakyReLU(0.1)(x) 40 | return x 41 | return block 42 | 43 | def upscale(self, filters): 44 | def block(x): 45 | x = Conv2D(filters * 4, kernel_size=3, padding='same')(x) 46 | x = LeakyReLU(0.1)(x) 47 | x = PixelShuffler()(x) 48 | return x 49 | return block 50 | 51 | def Encoder(self): 52 | input_ = Input(shape=IMAGE_SHAPE) 53 | x = input_ 54 | x = self.conv(128)(x) 55 | x = self.conv(256)(x) 56 | x = self.conv(512)(x) 57 | x = self.conv(1024)(x) 58 | x = Dense(ENCODER_DIM)(Flatten()(x)) 59 | x = Dense(4 * 4 * 1024)(x) 60 | x = Reshape((4, 4, 1024))(x) 61 | x = self.upscale(512)(x) 62 | return KerasModel(input_, x) 63 | 64 | def Decoder(self): 65 | input_ = Input(shape=(8, 8, 512)) 66 | x = input_ 67 | x = self.upscale(256)(x) 68 | x = self.upscale(128)(x) 69 | x = self.upscale(64)(x) 70 | x = Conv2D(3, kernel_size=5, padding='same', activation='sigmoid')(x) 71 | return KerasModel(input_, x) 72 | -------------------------------------------------------------------------------- /plugins/model/Model_Original/Trainer.py: -------------------------------------------------------------------------------- 1 | 2 | import time 3 | import numpy 4 | from lib.training_data import TrainingDataGenerator, stack_images 5 | 6 | class Trainer(): 7 | random_transform_args = { 8 | 'rotation_range': 10, 9 | 'zoom_range': 0.05, 10 | 'shift_range': 0.05, 11 | 'random_flip': 0.4, 12 | } 13 | 14 | def __init__(self, model, fn_A, fn_B, batch_size, *args): 15 | self.batch_size = batch_size 16 | self.model = model 17 | 18 | generator = TrainingDataGenerator(self.random_transform_args, 160) 19 | self.images_A = generator.minibatchAB(fn_A, self.batch_size) 20 | self.images_B = generator.minibatchAB(fn_B, self.batch_size) 21 | 22 | def train_one_step(self, iter, viewer): 23 | epoch, warped_A, target_A = next(self.images_A) 24 | epoch, warped_B, target_B = next(self.images_B) 25 | 26 | loss_A = self.model.autoencoder_A.train_on_batch(warped_A, target_A) 27 | loss_B = self.model.autoencoder_B.train_on_batch(warped_B, target_B) 28 | 29 | self.model._epoch_no += 1 30 | 31 | print("[{0}] [#{1:05d}] loss_A: {2:.5f}, loss_B: {3:.5f}".format(time.strftime("%H:%M:%S"), self.model.epoch_no, loss_A, loss_B), 32 | end='\r') 33 | 34 | if viewer is not None: 35 | viewer(self.show_sample(target_A[0:14], target_B[0:14]), "training") 36 | 37 | def show_sample(self, test_A, test_B): 38 | figure_A = numpy.stack([ 39 | test_A, 40 | self.model.autoencoder_A.predict(test_A), 41 | self.model.autoencoder_B.predict(test_A), 42 | ], axis=1) 43 | figure_B = numpy.stack([ 44 | test_B, 45 | self.model.autoencoder_B.predict(test_B), 46 | self.model.autoencoder_A.predict(test_B), 47 | ], axis=1) 48 | 49 | if test_A.shape[0] % 2 == 1: 50 | figure_A = numpy.concatenate ([figure_A, numpy.expand_dims(figure_A[0],0) ]) 51 | figure_B = numpy.concatenate ([figure_B, numpy.expand_dims(figure_B[0],0) ]) 52 | 53 | figure = numpy.concatenate([figure_A, figure_B], axis=0) 54 | w = 4 55 | h = int( figure.shape[0] / w) 56 | figure = figure.reshape((w, h) + figure.shape[1:]) 57 | figure = stack_images(figure) 58 | 59 | return numpy.clip(figure * 255, 0, 255).astype('uint8') 60 | -------------------------------------------------------------------------------- /plugins/model/Model_Original/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | __author__ = """Based on https://reddit.com/u/deepfakes/""" 4 | __version__ = '0.1.0' 5 | 6 | from .Model import Model 7 | from .Trainer import Trainer 8 | from .AutoEncoder import AutoEncoder -------------------------------------------------------------------------------- /plugins/model/Model_Original/__pycache__/AutoEncoder.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/plugins/model/Model_Original/__pycache__/AutoEncoder.cpython-36.pyc -------------------------------------------------------------------------------- /plugins/model/Model_Original/__pycache__/Model.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/plugins/model/Model_Original/__pycache__/Model.cpython-36.pyc -------------------------------------------------------------------------------- /plugins/model/Model_Original/__pycache__/Trainer.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/plugins/model/Model_Original/__pycache__/Trainer.cpython-36.pyc -------------------------------------------------------------------------------- /plugins/model/Model_Original/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/plugins/model/Model_Original/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /plugins/model/Model_OriginalHighRes/Trainer.py: -------------------------------------------------------------------------------- 1 | import time 2 | import numpy 3 | 4 | from lib.training_data import TrainingDataGenerator, stack_images 5 | 6 | 7 | TRANSFORM_PRC = 115. 8 | 9 | 10 | class Trainer(): 11 | 12 | _random_transform_args = { 13 | 'rotation_range': 10 * (TRANSFORM_PRC * .01), 14 | 'zoom_range': 0.05 * (TRANSFORM_PRC * .01), 15 | 'shift_range': 0.05 * (TRANSFORM_PRC * .01), 16 | 'random_flip': 0.4 * (TRANSFORM_PRC * .01), 17 | } 18 | 19 | def __init__(self, model, fn_A, fn_B, batch_size, *args): 20 | self.batch_size = batch_size 21 | self.model = model 22 | from timeit import default_timer as clock 23 | self._clock = clock 24 | 25 | generator = TrainingDataGenerator(self.random_transform_args, 160, 5, zoom=self.model.IMAGE_SHAPE[0]//64) 26 | 27 | self.images_A = generator.minibatchAB(fn_A, self.batch_size) 28 | self.images_B = generator.minibatchAB(fn_B, self.batch_size) 29 | 30 | self.generator = generator 31 | 32 | 33 | def train_one_step(self, iter_no, viewer): 34 | when = self._clock() 35 | _, warped_A, target_A = next(self.images_A) 36 | _, warped_B, target_B = next(self.images_B) 37 | 38 | loss_A = self.model.autoencoder_A.train_on_batch(warped_A, target_A) 39 | loss_B = self.model.autoencoder_B.train_on_batch(warped_B, target_B) 40 | 41 | self.model._epoch_no += 1 42 | 43 | if isinstance(loss_A, (list, tuple)): 44 | print("[{0}] [#{1:05d}] [{2:.3f}s] loss_A: {3:.5f}, loss_B: {4:.5f}".format( 45 | time.strftime("%H:%M:%S"), self.model._epoch_no, self._clock()-when, loss_A[1], loss_B[1]), 46 | end='\r') 47 | else: 48 | print("[{0}] [#{1:05d}] [{2:.3f}s] loss_A: {3:.5f}, loss_B: {4:.5f}".format( 49 | time.strftime("%H:%M:%S"), self.model._epoch_no, self._clock()-when, loss_A, loss_B), 50 | end='\r') 51 | 52 | if viewer is not None: 53 | viewer(self.show_sample(target_A[0:8], target_B[0:8]), "training using {}, bs={}".format(self.model, self.batch_size)) 54 | 55 | 56 | def show_sample(self, test_A, test_B): 57 | figure_A = numpy.stack([ 58 | test_A, 59 | self.model.autoencoder_A.predict(test_A), 60 | self.model.autoencoder_B.predict(test_A), 61 | ], axis=1) 62 | 63 | figure_B = numpy.stack([ 64 | test_B, 65 | self.model.autoencoder_B.predict(test_B), 66 | self.model.autoencoder_A.predict(test_B), 67 | ], axis=1) 68 | 69 | if (test_A.shape[0] % 2)!=0: 70 | figure_A = numpy.concatenate ([figure_A, numpy.expand_dims(figure_A[0],0) ]) 71 | figure_B = numpy.concatenate ([figure_B, numpy.expand_dims(figure_B[0],0) ]) 72 | 73 | figure = numpy.concatenate([figure_A, figure_B], axis=0) 74 | w = 4 75 | h = int( figure.shape[0] / w) 76 | figure = figure.reshape((w, h) + figure.shape[1:]) 77 | figure = stack_images(figure) 78 | 79 | return numpy.clip(figure * 255, 0, 255).astype('uint8') 80 | 81 | 82 | @property 83 | def random_transform_args(self): 84 | return self._random_transform_args 85 | -------------------------------------------------------------------------------- /plugins/model/Model_OriginalHighRes/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | __author__ = """Based on https://reddit.com/u/deepfakes/""" 4 | 5 | from ._version import __version__ 6 | from .Model import Model 7 | from .Trainer import Trainer 8 | 9 | -------------------------------------------------------------------------------- /plugins/model/Model_OriginalHighRes/_version.py: -------------------------------------------------------------------------------- 1 | __version__ = 0, 2, 7 -------------------------------------------------------------------------------- /plugins/model/Model_OriginalHighRes/instance_normalization.py: -------------------------------------------------------------------------------- 1 | from keras.engine import Layer, InputSpec 2 | from keras import initializers, regularizers, constraints 3 | from keras import backend as K 4 | from keras.utils.generic_utils import get_custom_objects 5 | 6 | import numpy as np 7 | 8 | 9 | class InstanceNormalization(Layer): 10 | """Instance normalization layer (Lei Ba et al, 2016, Ulyanov et al., 2016). 11 | Normalize the activations of the previous layer at each step, 12 | i.e. applies a transformation that maintains the mean activation 13 | close to 0 and the activation standard deviation close to 1. 14 | # Arguments 15 | axis: Integer, the axis that should be normalized 16 | (typically the features axis). 17 | For instance, after a `Conv2D` layer with 18 | `data_format="channels_first"`, 19 | set `axis=1` in `InstanceNormalization`. 20 | Setting `axis=None` will normalize all values in each instance of the batch. 21 | Axis 0 is the batch dimension. `axis` cannot be set to 0 to avoid errors. 22 | epsilon: Small float added to variance to avoid dividing by zero. 23 | center: If True, add offset of `beta` to normalized tensor. 24 | If False, `beta` is ignored. 25 | scale: If True, multiply by `gamma`. 26 | If False, `gamma` is not used. 27 | When the next layer is linear (also e.g. `nn.relu`), 28 | this can be disabled since the scaling 29 | will be done by the next layer. 30 | beta_initializer: Initializer for the beta weight. 31 | gamma_initializer: Initializer for the gamma weight. 32 | beta_regularizer: Optional regularizer for the beta weight. 33 | gamma_regularizer: Optional regularizer for the gamma weight. 34 | beta_constraint: Optional constraint for the beta weight. 35 | gamma_constraint: Optional constraint for the gamma weight. 36 | # Input shape 37 | Arbitrary. Use the keyword argument `input_shape` 38 | (tuple of integers, does not include the samples axis) 39 | when using this layer as the first layer in a model. 40 | # Output shape 41 | Same shape as input. 42 | # References 43 | - [Layer Normalization](https://arxiv.org/abs/1607.06450) 44 | - [Instance Normalization: The Missing Ingredient for Fast Stylization](https://arxiv.org/abs/1607.08022) 45 | """ 46 | def __init__(self, 47 | axis=None, 48 | epsilon=1e-3, 49 | center=True, 50 | scale=True, 51 | beta_initializer='zeros', 52 | gamma_initializer='ones', 53 | beta_regularizer=None, 54 | gamma_regularizer=None, 55 | beta_constraint=None, 56 | gamma_constraint=None, 57 | **kwargs): 58 | super(InstanceNormalization, self).__init__(**kwargs) 59 | self.supports_masking = True 60 | self.axis = axis 61 | self.epsilon = epsilon 62 | self.center = center 63 | self.scale = scale 64 | self.beta_initializer = initializers.get(beta_initializer) 65 | self.gamma_initializer = initializers.get(gamma_initializer) 66 | self.beta_regularizer = regularizers.get(beta_regularizer) 67 | self.gamma_regularizer = regularizers.get(gamma_regularizer) 68 | self.beta_constraint = constraints.get(beta_constraint) 69 | self.gamma_constraint = constraints.get(gamma_constraint) 70 | 71 | def build(self, input_shape): 72 | ndim = len(input_shape) 73 | if self.axis == 0: 74 | raise ValueError('Axis cannot be zero') 75 | 76 | if (self.axis is not None) and (ndim == 2): 77 | raise ValueError('Cannot specify axis for rank 1 tensor') 78 | 79 | self.input_spec = InputSpec(ndim=ndim) 80 | 81 | if self.axis is None: 82 | shape = (1,) 83 | else: 84 | shape = (input_shape[self.axis],) 85 | 86 | if self.scale: 87 | self.gamma = self.add_weight(shape=shape, 88 | name='gamma', 89 | initializer=self.gamma_initializer, 90 | regularizer=self.gamma_regularizer, 91 | constraint=self.gamma_constraint) 92 | else: 93 | self.gamma = None 94 | if self.center: 95 | self.beta = self.add_weight(shape=shape, 96 | name='beta', 97 | initializer=self.beta_initializer, 98 | regularizer=self.beta_regularizer, 99 | constraint=self.beta_constraint) 100 | else: 101 | self.beta = None 102 | self.built = True 103 | 104 | def call(self, inputs, training=None): 105 | input_shape = K.int_shape(inputs) 106 | reduction_axes = list(range(0, len(input_shape))) 107 | 108 | if (self.axis is not None): 109 | del reduction_axes[self.axis] 110 | 111 | del reduction_axes[0] 112 | 113 | mean = K.mean(inputs, reduction_axes, keepdims=True) 114 | stddev = K.std(inputs, reduction_axes, keepdims=True) + self.epsilon 115 | normed = (inputs - mean) / stddev 116 | 117 | broadcast_shape = [1] * len(input_shape) 118 | if self.axis is not None: 119 | broadcast_shape[self.axis] = input_shape[self.axis] 120 | 121 | if self.scale: 122 | broadcast_gamma = K.reshape(self.gamma, broadcast_shape) 123 | normed = normed * broadcast_gamma 124 | if self.center: 125 | broadcast_beta = K.reshape(self.beta, broadcast_shape) 126 | normed = normed + broadcast_beta 127 | return normed 128 | 129 | def get_config(self): 130 | config = { 131 | 'axis': self.axis, 132 | 'epsilon': self.epsilon, 133 | 'center': self.center, 134 | 'scale': self.scale, 135 | 'beta_initializer': initializers.serialize(self.beta_initializer), 136 | 'gamma_initializer': initializers.serialize(self.gamma_initializer), 137 | 'beta_regularizer': regularizers.serialize(self.beta_regularizer), 138 | 'gamma_regularizer': regularizers.serialize(self.gamma_regularizer), 139 | 'beta_constraint': constraints.serialize(self.beta_constraint), 140 | 'gamma_constraint': constraints.serialize(self.gamma_constraint) 141 | } 142 | base_config = super(InstanceNormalization, self).get_config() 143 | return dict(list(base_config.items()) + list(config.items())) 144 | 145 | get_custom_objects().update({'InstanceNormalization': InstanceNormalization}) 146 | -------------------------------------------------------------------------------- /plugins/model/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/plugins/model/__init__.py -------------------------------------------------------------------------------- /plugins/model/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/plugins/model/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /plugins/plugin_loader.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ Plugin loader for extract, training and model tasks """ 3 | 4 | import os 5 | from importlib import import_module 6 | 7 | 8 | class PluginLoader(): 9 | """ Plugin loader for extract, training and model tasks """ 10 | @staticmethod 11 | def get_detector(name): 12 | """ Return requested detector plugin """ 13 | return PluginLoader._import("extract.detect", name) 14 | 15 | @staticmethod 16 | def get_aligner(name): 17 | """ Return requested detector plugin """ 18 | return PluginLoader._import("extract.align", name) 19 | 20 | @staticmethod 21 | def get_converter(name): 22 | """ Return requested converter plugin """ 23 | return PluginLoader._import("Convert", "Convert_{0}".format(name)) 24 | 25 | @staticmethod 26 | def get_model(name): 27 | """ Return requested model plugin """ 28 | return PluginLoader._import("Model", "Model_{0}".format(name)) 29 | 30 | @staticmethod 31 | def get_trainer(name): 32 | """ Return requested trainer plugin """ 33 | return PluginLoader._import("Trainer", "Model_{0}".format(name)) 34 | 35 | @staticmethod 36 | def _import(attr, name): 37 | """ Import the plugin's module """ 38 | ttl = attr.split(".")[-1].title() 39 | print("Loading {} from {} plugin...".format(ttl, name.title())) 40 | attr = "model" if attr == "Trainer" else attr.lower() 41 | mod = ".".join(("plugins", attr, name)) 42 | module = import_module(mod) 43 | return getattr(module, ttl) 44 | 45 | @staticmethod 46 | def get_available_models(): 47 | """ Return a list of available models """ 48 | models = () 49 | modelpath = os.path.join(os.path.dirname(__file__), "model") 50 | for modeldir in next(os.walk(modelpath))[1]: 51 | if modeldir[0:6].lower() == 'model_': 52 | models += (modeldir[6:],) 53 | return models 54 | 55 | @staticmethod 56 | def get_available_extractors(extractor_type): 57 | """ Return a list of available models """ 58 | extractpath = os.path.join(os.path.dirname(__file__), 59 | "extract", 60 | extractor_type) 61 | extractors = sorted(item.name.replace(".py", "").replace("_", "-") 62 | for item in os.scandir(extractpath) 63 | if not item.name.startswith("_") 64 | and item.name.endswith(".py") 65 | and item.name != "manual.py") 66 | return extractors 67 | 68 | @staticmethod 69 | def get_default_model(): 70 | """ Return the default model """ 71 | models = PluginLoader.get_available_models() 72 | return 'Original' if 'Original' in models else models[0] 73 | -------------------------------------------------------------------------------- /plugins/plugin_loader.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/plugins/plugin_loader.pyc -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | tqdm 2 | psutil 3 | pathlib==1.0.1 4 | scandir==1.7 5 | opencv-python 6 | scikit-image 7 | scikit-learn 8 | matplotlib==2.2.2 9 | ffmpy==0.2.2 10 | nvidia-ml-py3 11 | h5py==2.8.0 12 | Keras==2.2.4 13 | cmake 14 | dlib 15 | face_recognition 16 | 17 | # tensorflow is included within the docker image. 18 | # If you are looking for dependencies for a manual install, 19 | # you may want to install tensorflow-gpu==1.4.0 for CUDA 8.0 or tensorflow-gpu>=1.6.0 for CUDA 9.0 20 | # NB: MTCNN will not work with tensorflow releases prior to 1.6.0 21 | -------------------------------------------------------------------------------- /scripts/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/scripts/.DS_Store -------------------------------------------------------------------------------- /scripts/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/scripts/__init__.py -------------------------------------------------------------------------------- /scripts/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/scripts/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /scripts/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/scripts/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /scripts/__pycache__/convert.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/scripts/__pycache__/convert.cpython-36.pyc -------------------------------------------------------------------------------- /scripts/__pycache__/convert.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/scripts/__pycache__/convert.cpython-37.pyc -------------------------------------------------------------------------------- /scripts/__pycache__/extract.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/scripts/__pycache__/extract.cpython-36.pyc -------------------------------------------------------------------------------- /scripts/__pycache__/extract.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/scripts/__pycache__/extract.cpython-37.pyc -------------------------------------------------------------------------------- /scripts/__pycache__/fsmedia.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/scripts/__pycache__/fsmedia.cpython-36.pyc -------------------------------------------------------------------------------- /scripts/__pycache__/fsmedia.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/scripts/__pycache__/fsmedia.cpython-37.pyc -------------------------------------------------------------------------------- /scripts/__pycache__/train.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/scripts/__pycache__/train.cpython-36.pyc -------------------------------------------------------------------------------- /scripts/gui.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin python3 2 | """ The optional GUI for faceswap """ 3 | 4 | import os 5 | import sys 6 | import tkinter as tk 7 | 8 | from tkinter import messagebox, ttk 9 | 10 | from lib.gui import (CliOptions, CurrentSession, CommandNotebook, Config, 11 | ConsoleOut, DisplayNotebook, Images, ProcessWrapper, 12 | StatusBar) 13 | 14 | 15 | class FaceswapGui(tk.Tk): 16 | """ The Graphical User Interface """ 17 | 18 | def __init__(self, pathscript): 19 | tk.Tk.__init__(self) 20 | self.scaling_factor = self.get_scaling() 21 | self.set_geometry() 22 | 23 | pathcache = os.path.join(pathscript, "lib", "gui", ".cache") 24 | self.images = Images(pathcache) 25 | self.cliopts = CliOptions() 26 | self.session = CurrentSession() 27 | statusbar = StatusBar(self) 28 | self.wrapper = ProcessWrapper(statusbar, 29 | self.session, 30 | pathscript, 31 | self.cliopts) 32 | 33 | self.images.delete_preview() 34 | self.protocol("WM_DELETE_WINDOW", self.close_app) 35 | 36 | def get_scaling(self): 37 | """ Get the display DPI """ 38 | dpi = self.winfo_fpixels("1i") 39 | return dpi / 72.0 40 | 41 | def set_geometry(self): 42 | """ Set GUI geometry """ 43 | self.tk.call("tk", "scaling", self.scaling_factor) 44 | width = int(1200 * self.scaling_factor) 45 | height = int(640 * self.scaling_factor) 46 | self.geometry("{}x{}+80+80".format(str(width), str(height))) 47 | 48 | def build_gui(self, debug_console): 49 | """ Build the GUI """ 50 | self.title("Faceswap.py") 51 | self.menu() 52 | 53 | topcontainer, bottomcontainer = self.add_containers() 54 | 55 | CommandNotebook(topcontainer, 56 | self.cliopts, 57 | self.wrapper.tk_vars, 58 | self.scaling_factor) 59 | DisplayNotebook(topcontainer, 60 | self.session, 61 | self.wrapper.tk_vars, 62 | self.scaling_factor) 63 | ConsoleOut(bottomcontainer, debug_console, self.wrapper.tk_vars) 64 | 65 | def menu(self): 66 | """ Menu bar for loading and saving configs """ 67 | menubar = tk.Menu(self) 68 | filemenu = tk.Menu(menubar, tearoff=0) 69 | 70 | config = Config(self.cliopts, self.wrapper.tk_vars) 71 | 72 | filemenu.add_command(label="Load full config...", 73 | underline=0, 74 | command=config.load) 75 | filemenu.add_command(label="Save full config...", 76 | underline=0, 77 | command=config.save) 78 | filemenu.add_separator() 79 | filemenu.add_command(label="Reset all to default", 80 | underline=0, 81 | command=self.cliopts.reset) 82 | filemenu.add_command(label="Clear all", 83 | underline=0, 84 | command=self.cliopts.clear) 85 | filemenu.add_separator() 86 | filemenu.add_command(label="Quit", 87 | underline=0, 88 | command=self.close_app) 89 | 90 | menubar.add_cascade(label="File", menu=filemenu, underline=0) 91 | self.config(menu=menubar) 92 | 93 | def add_containers(self): 94 | """ Add the paned window containers that 95 | hold each main area of the gui """ 96 | maincontainer = tk.PanedWindow(self, 97 | sashrelief=tk.RAISED, 98 | orient=tk.VERTICAL) 99 | maincontainer.pack(fill=tk.BOTH, expand=True) 100 | 101 | topcontainer = tk.PanedWindow(maincontainer, 102 | sashrelief=tk.RAISED, 103 | orient=tk.HORIZONTAL) 104 | maincontainer.add(topcontainer) 105 | 106 | bottomcontainer = ttk.Frame(maincontainer, height=150) 107 | maincontainer.add(bottomcontainer) 108 | 109 | return topcontainer, bottomcontainer 110 | 111 | def close_app(self): 112 | """ Close Python. This is here because the graph 113 | animation function continues to run even when 114 | tkinter has gone away """ 115 | confirm = messagebox.askokcancel 116 | confirmtxt = "Processes are still running. Are you sure...?" 117 | if (self.wrapper.tk_vars["runningtask"].get() 118 | and not confirm("Close", confirmtxt)): 119 | return 120 | if self.wrapper.tk_vars["runningtask"].get(): 121 | self.wrapper.task.terminate() 122 | self.images.delete_preview() 123 | self.quit() 124 | exit() 125 | 126 | 127 | class Gui(object): 128 | """ The GUI process. """ 129 | def __init__(self, arguments): 130 | cmd = sys.argv[0] 131 | pathscript = os.path.realpath(os.path.dirname(cmd)) 132 | self.args = arguments 133 | self.root = FaceswapGui(pathscript) 134 | 135 | def process(self): 136 | """ Builds the GUI """ 137 | self.root.build_gui(self.args.debug) 138 | self.root.mainloop() 139 | -------------------------------------------------------------------------------- /scripts/train.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin python3 2 | """ The script to run the training process of faceswap """ 3 | 4 | import os 5 | import sys 6 | import threading 7 | 8 | import cv2 9 | import tensorflow as tf 10 | from keras.backend.tensorflow_backend import set_session 11 | 12 | from lib.utils import (get_folder, get_image_paths, set_system_verbosity, 13 | Timelapse) 14 | from plugins.plugin_loader import PluginLoader 15 | 16 | 17 | class Train(): 18 | """ The training process. """ 19 | def __init__(self, arguments): 20 | self.args = arguments 21 | self.images = self.get_images() 22 | self.stop = False 23 | self.save_now = False 24 | self.preview_buffer = dict() 25 | self.lock = threading.Lock() 26 | 27 | # this is so that you can enter case insensitive values for trainer 28 | trainer_name = self.args.trainer 29 | self.trainer_name = trainer_name 30 | if trainer_name.lower() == "lowmem": 31 | self.trainer_name = "LowMem" 32 | self.timelapse = None 33 | 34 | def process(self): 35 | """ Call the training process object """ 36 | print("Training data directory: {}".format(self.args.model_dir)) 37 | lvl = '0' if self.args.verbose else '2' 38 | set_system_verbosity(lvl) 39 | thread = self.start_thread() 40 | 41 | if self.args.preview: 42 | self.monitor_preview() 43 | else: 44 | self.monitor_console() 45 | 46 | self.end_thread(thread) 47 | 48 | def get_images(self): 49 | """ Check the image dirs exist, contain images and return the image 50 | objects """ 51 | images = [] 52 | for image_dir in [self.args.input_A, self.args.input_B]: 53 | if not os.path.isdir(image_dir): 54 | print('Error: {} does not exist'.format(image_dir)) 55 | exit(1) 56 | 57 | if not os.listdir(image_dir): 58 | print('Error: {} contains no images'.format(image_dir)) 59 | exit(1) 60 | 61 | images.append(get_image_paths(image_dir)) 62 | print("Model A Directory: {}".format(self.args.input_A)) 63 | print("Model B Directory: {}".format(self.args.input_B)) 64 | return images 65 | 66 | def start_thread(self): 67 | """ Put the training process in a thread so we can keep control """ 68 | thread = threading.Thread(target=self.process_thread) 69 | thread.start() 70 | return thread 71 | 72 | def end_thread(self, thread): 73 | """ On termination output message and join thread back to main """ 74 | print("Exit requested! The trainer will complete its current cycle, " 75 | "save the models and quit (it can take up a couple of seconds " 76 | "depending on your training speed). If you want to kill it now, " 77 | "press Ctrl + c") 78 | self.stop = True 79 | thread.join() 80 | sys.stdout.flush() 81 | 82 | def process_thread(self): 83 | """ The training process to be run inside a thread """ 84 | try: 85 | print("Loading data, this may take a while...") 86 | 87 | if self.args.allow_growth: 88 | self.set_tf_allow_growth() 89 | 90 | model = self.load_model() 91 | trainer = self.load_trainer(model) 92 | 93 | self.timelapse = Timelapse.create_timelapse( 94 | self.args.timelapse_input_A, 95 | self.args.timelapse_input_B, 96 | self.args.timelapse_output, 97 | trainer) 98 | 99 | self.run_training_cycle(model, trainer) 100 | except KeyboardInterrupt: 101 | try: 102 | model.save_weights() 103 | except KeyboardInterrupt: 104 | print("Saving model weights has been cancelled!") 105 | exit(0) 106 | except Exception as err: 107 | raise err 108 | 109 | def load_model(self): 110 | """ Load the model requested for training """ 111 | model_dir = get_folder(self.args.model_dir) 112 | model = PluginLoader.get_model(self.trainer_name)(model_dir, 113 | self.args.gpus) 114 | 115 | model.load(swapped=False) 116 | return model 117 | 118 | def load_trainer(self, model): 119 | """ Load the trainer requested for training """ 120 | images_a, images_b = self.images 121 | 122 | trainer = PluginLoader.get_trainer(self.trainer_name) 123 | trainer = trainer(model, 124 | images_a, 125 | images_b, 126 | self.args.batch_size, 127 | self.args.perceptual_loss) 128 | return trainer 129 | 130 | def run_training_cycle(self, model, trainer): 131 | """ Perform the training cycle """ 132 | for iteration in range(0, self.args.iterations): 133 | save_iteration = iteration % self.args.save_interval == 0 134 | viewer = self.show if save_iteration or self.save_now else None 135 | if save_iteration and self.timelapse is not None: 136 | self.timelapse.work() 137 | trainer.train_one_step(iteration, viewer) 138 | if self.stop: 139 | break 140 | elif save_iteration: 141 | model.save_weights() 142 | elif self.save_now: 143 | model.save_weights() 144 | self.save_now = False 145 | model.save_weights() 146 | self.stop = True 147 | 148 | def monitor_preview(self): 149 | """ Generate the preview window and wait for keyboard input """ 150 | print("Using live preview.\n" 151 | "Press 'ENTER' on the preview window to save and quit.\n" 152 | "Press 'S' on the preview window to save model weights " 153 | "immediately") 154 | while True: 155 | try: 156 | with self.lock: 157 | for name, image in self.preview_buffer.items(): 158 | cv2.imshow(name, image) 159 | 160 | key = cv2.waitKey(1000) 161 | if key == ord("\n") or key == ord("\r"): 162 | break 163 | if key == ord("s"): 164 | self.save_now = True 165 | if self.stop: 166 | break 167 | except KeyboardInterrupt: 168 | break 169 | 170 | @staticmethod 171 | def monitor_console(): 172 | """ Monitor the console for any input followed by enter or ctrl+c """ 173 | # TODO: how to catch a specific key instead of Enter? 174 | # there isn't a good multiplatform solution: 175 | # https://stackoverflow.com/questions/3523174 176 | # TODO: Find a way to interrupt input() if the target iterations are 177 | # reached. At the moment, setting a target iteration and using the -p 178 | # flag is the only guaranteed way to exit the training loop on 179 | # hitting target iterations. 180 | print("Starting. Press 'ENTER' to stop training and save model") 181 | try: 182 | input() 183 | except KeyboardInterrupt: 184 | pass 185 | 186 | @staticmethod 187 | def set_tf_allow_growth(): 188 | """ Allow TensorFlow to manage VRAM growth """ 189 | config = tf.ConfigProto() 190 | config.gpu_options.allow_growth = True 191 | config.gpu_options.visible_device_list = "0" 192 | set_session(tf.Session(config=config)) 193 | 194 | def show(self, image, name=""): 195 | """ Generate the preview and write preview file output """ 196 | try: 197 | scriptpath = os.path.realpath(os.path.dirname(sys.argv[0])) 198 | if self.args.write_image: 199 | img = "_sample_{}.jpg".format(name) 200 | imgfile = os.path.join(scriptpath, img) 201 | cv2.imwrite(imgfile, image) 202 | if self.args.redirect_gui: 203 | img = ".gui_preview_{}.jpg".format(name) 204 | imgfile = os.path.join(scriptpath, "lib", "gui", 205 | ".cache", "preview", img) 206 | cv2.imwrite(imgfile, image) 207 | if self.args.preview: 208 | with self.lock: 209 | self.preview_buffer[name] = image 210 | except Exception as err: 211 | print("could not preview sample") 212 | raise err 213 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [flake8] 2 | max-line-length = 99 3 | exclude = .git, __pycache__ 4 | -------------------------------------------------------------------------------- /tools.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ The master tools.py script """ 3 | import sys 4 | # Importing the various tools 5 | import tools.cli as cli 6 | from lib.cli import FullHelpArgumentParser, GuiArgs 7 | 8 | # Python version check 9 | if sys.version_info[0] < 3: 10 | raise Exception("This program requires at least python3.2") 11 | if sys.version_info[0] == 3 and sys.version_info[1] < 2: 12 | raise Exception("This program requires at least python3.2") 13 | 14 | 15 | def bad_args(args): 16 | """ Print help on bad arguments """ 17 | PARSER.print_help() 18 | exit(0) 19 | 20 | 21 | if __name__ == "__main__": 22 | _tools_warning = "Please backup your data and/or test the tool you want " 23 | _tools_warning += "to use with a smaller data set to make sure you " 24 | _tools_warning += "understand how it works." 25 | print(_tools_warning) 26 | 27 | PARSER = FullHelpArgumentParser() 28 | SUBPARSER = PARSER.add_subparsers() 29 | ALIGN = cli.AlignmentsArgs(SUBPARSER, 30 | "alignments", 31 | "This command lets you perform various tasks " 32 | "pertaining to an alignments file.") 33 | EFFMPEG = cli.EffmpegArgs(SUBPARSER, 34 | "effmpeg", 35 | "This command allows you to easily execute " 36 | "common ffmpeg tasks.") 37 | SORT = cli.SortArgs(SUBPARSER, 38 | "sort", 39 | "This command lets you sort images using various " 40 | "methods.") 41 | GUI = GuiArgs(SUBPARSER, 42 | "gui", 43 | "Launch the Faceswap Tools Graphical User Interface.") 44 | PARSER.set_defaults(func=bad_args) 45 | ARGUMENTS = PARSER.parse_args() 46 | ARGUMENTS.func(ARGUMENTS) 47 | -------------------------------------------------------------------------------- /tools/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/belligerentbeagle/DeepFakeTutorial/7a7408453ab53ede80cd11b8d1236860023d1337/tools/__init__.py -------------------------------------------------------------------------------- /tools/alignments.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ Tools for manipulating the alignments seralized file """ 3 | 4 | # TODO merge alignments 5 | from lib.utils import set_system_verbosity 6 | from .lib_alignments import (AlignmentData, Check, Draw, Extract, Legacy, 7 | Manual, Reformat, RemoveAlignments, Sort, Spatial) 8 | 9 | 10 | class Alignments(): 11 | """ Perform tasks relating to alignments file """ 12 | def __init__(self, arguments): 13 | self.args = arguments 14 | self.set_verbosity(arguments.verbose) 15 | 16 | dest_format = self.get_dest_format() 17 | self.alignments = AlignmentData(self.args.alignments_file, 18 | dest_format, 19 | self.args.verbose) 20 | 21 | @staticmethod 22 | def set_verbosity(verbose): 23 | """ Set the system output verbosity """ 24 | lvl = '0' if verbose else '2' 25 | set_system_verbosity(lvl) 26 | 27 | def get_dest_format(self): 28 | """ Set the destination format for Alignments """ 29 | dest_format = None 30 | if (hasattr(self.args, 'alignment_format') 31 | and self.args.alignment_format): 32 | dest_format = self.args.alignment_format 33 | return dest_format 34 | 35 | def process(self): 36 | """ Main processing function of the Align tool """ 37 | if self.args.job.startswith("extract"): 38 | job = Extract 39 | elif self.args.job.startswith("remove-"): 40 | job = RemoveAlignments 41 | elif self.args.job.startswith("sort-"): 42 | job = Sort 43 | elif self.args.job in("missing-alignments", "missing-frames", 44 | "multi-faces", "leftover-faces", 45 | "no-faces"): 46 | job = Check 47 | else: 48 | job = globals()[self.args.job.title()] 49 | job = job(self.alignments, self.args) 50 | job.process() 51 | -------------------------------------------------------------------------------- /tools/lib_alignments/__init__.py: -------------------------------------------------------------------------------- 1 | from tools.lib_alignments.media import AlignmentData, ExtractedFaces, Faces, Frames 2 | from tools.lib_alignments.annotate import Annotate 3 | from tools.lib_alignments.jobs import Check, Draw, Extract, Legacy, Reformat, RemoveAlignments, Sort, Spatial 4 | from tools.lib_alignments.jobs_manual import Manual 5 | -------------------------------------------------------------------------------- /tools/lib_alignments/annotate.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ Tools for annotating an input image """ 3 | 4 | from cv2 import ( # pylint: disable=no-name-in-module 5 | rectangle, circle, polylines, putText, 6 | FONT_HERSHEY_DUPLEX, fillPoly, addWeighted) 7 | from numpy import array, int32, uint8, zeros 8 | 9 | from lib.align_eyes import FACIAL_LANDMARKS_IDXS 10 | 11 | 12 | class Annotate(): 13 | """ Annotate an input image """ 14 | 15 | def __init__(self, image, alignments, original_roi=None): 16 | self.image = image 17 | self.alignments = alignments 18 | self.roi = original_roi 19 | self.colors = {1: (255, 0, 0), 20 | 2: (0, 255, 0), 21 | 3: (0, 0, 255), 22 | 4: (255, 255, 0), 23 | 5: (255, 0, 255), 24 | 6: (0, 255, 255)} 25 | 26 | def draw_black_image(self): 27 | """ Change image to black at correct dimensions """ 28 | height, width = self.image.shape[:2] 29 | self.image = zeros((height, width, 3), uint8) 30 | 31 | def draw_bounding_box(self, color_id=1, thickness=1): 32 | """ Draw the bounding box around faces """ 33 | color = self.colors[color_id] 34 | for alignment in self.alignments: 35 | top_left = (alignment["x"], alignment["y"]) 36 | bottom_right = (alignment["x"] + alignment["w"], 37 | alignment["y"] + alignment["h"]) 38 | rectangle(self.image, top_left, bottom_right, 39 | color, thickness) 40 | 41 | def draw_extract_box(self, color_id=2, thickness=1): 42 | """ Draw the extracted face box """ 43 | if not self.roi: 44 | return 45 | color = self.colors[color_id] 46 | for idx, roi in enumerate(self.roi): 47 | top_left = [point for point in roi.squeeze()[0]] 48 | top_left = (top_left[0], top_left[1] - 10) 49 | putText(self.image, str(idx), top_left, FONT_HERSHEY_DUPLEX, 1.0, 50 | color, thickness) 51 | polylines(self.image, [roi], True, color, thickness) 52 | 53 | def draw_landmarks(self, color_id=3, radius=1): 54 | """ Draw the facial landmarks """ 55 | color = self.colors[color_id] 56 | for alignment in self.alignments: 57 | landmarks = alignment["landmarksXY"] 58 | for (pos_x, pos_y) in landmarks: 59 | circle(self.image, (pos_x, pos_y), radius, color, -1) 60 | 61 | def draw_landmarks_mesh(self, color_id=4, thickness=1): 62 | """ Draw the facial landmarks """ 63 | color = self.colors[color_id] 64 | for alignment in self.alignments: 65 | landmarks = alignment["landmarksXY"] 66 | for key, val in FACIAL_LANDMARKS_IDXS.items(): 67 | points = array([landmarks[val[0]:val[1]]], int32) 68 | fill_poly = bool(key in ("right_eye", "left_eye", "mouth")) 69 | polylines(self.image, points, fill_poly, color, thickness) 70 | 71 | def draw_grey_out_faces(self, live_face): 72 | """ Grey out all faces except target """ 73 | if not self.roi: 74 | return 75 | alpha = 0.6 76 | overlay = self.image.copy() 77 | for idx, roi in enumerate(self.roi): 78 | if idx != int(live_face): 79 | fillPoly(overlay, roi, (0, 0, 0)) 80 | addWeighted(overlay, alpha, self.image, 1 - alpha, 0, self.image) 81 | --------------------------------------------------------------------------------