├── README.md ├── image_augmentation.py ├── model.py ├── pixel_shuffler.py ├── script.py ├── train.py ├── training_data.py ├── umeyama.py └── utils.py /README.md: -------------------------------------------------------------------------------- 1 | # deepfakes_faceswap 2 | This is the code from [deepfakes' faceswap project](https://www.reddit.com/user/deepfakes/). 3 | Hope we can improve it together, HAVE FUN! 4 | 5 | Message from deepfakes: 6 | 7 | **Whole project with training images and trained model (~300MB):** 8 | anonfile.com/p7w3m0d5be/face-swap.zip or [click here to download](anonfile.com/p7w3m0d5be/face-swap.zip) 9 | 10 | **Source code only:** 11 | anonfile.com/f6wbmfd2b2/face-swap-code.zip or [click here to download](anonfile.com/f6wbmfd2b2/face-swap-code.zip) 12 | 13 | **Requirements:** 14 | 15 | Python 3 16 | Opencv 3 17 | Tensorflow 1.3+(?) 18 | Keras 2 19 | 20 | you also need a modern GPU with CUDA support for best performance 21 | 22 | **How to run:** 23 | 24 | python train.py 25 | 26 | As you can see, the code is embarrassingly simple. I don't think it's worth the trouble to keep it secret from everyone. 27 | I believe the community are smart enough to finish the rest of the owl. 28 | 29 | If there is any question, welcome to discuss here. 30 | 31 | **Some tips:** 32 | 33 | Reuse existing models will train much faster than start from nothing. 34 | If there are not enough training data, start with someone looks similar, then switch the data. 35 | -------------------------------------------------------------------------------- /image_augmentation.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy 3 | 4 | from umeyama import umeyama 5 | 6 | def random_transform( image, rotation_range, zoom_range, shift_range, random_flip ): 7 | h,w = image.shape[0:2] 8 | rotation = numpy.random.uniform( -rotation_range, rotation_range ) 9 | scale = numpy.random.uniform( 1 - zoom_range, 1 + zoom_range ) 10 | tx = numpy.random.uniform( -shift_range, shift_range ) * w 11 | ty = numpy.random.uniform( -shift_range, shift_range ) * h 12 | mat = cv2.getRotationMatrix2D( (w//2,h//2), rotation, scale ) 13 | mat[:,2] += (tx,ty) 14 | result = cv2.warpAffine( image, mat, (w,h), borderMode=cv2.BORDER_REPLICATE ) 15 | if numpy.random.random() < random_flip: 16 | result = result[:,::-1] 17 | return result 18 | 19 | # get pair of random warped images from aligened face image 20 | def random_warp( image ): 21 | assert image.shape == (256,256,3) 22 | range_ = numpy.linspace( 128-80, 128+80, 5 ) 23 | mapx = numpy.broadcast_to( range_, (5,5) ) 24 | mapy = mapx.T 25 | 26 | mapx = mapx + numpy.random.normal( size=(5,5), scale=5 ) 27 | mapy = mapy + numpy.random.normal( size=(5,5), scale=5 ) 28 | 29 | interp_mapx = cv2.resize( mapx, (80,80) )[8:72,8:72].astype('float32') 30 | interp_mapy = cv2.resize( mapy, (80,80) )[8:72,8:72].astype('float32') 31 | 32 | warped_image = cv2.remap( image, interp_mapx, interp_mapy, cv2.INTER_LINEAR ) 33 | 34 | src_points = numpy.stack( [ mapx.ravel(), mapy.ravel() ], axis=-1 ) 35 | dst_points = numpy.mgrid[0:65:16,0:65:16].T.reshape(-1,2) 36 | mat = umeyama( src_points, dst_points, True )[0:2] 37 | 38 | target_image = cv2.warpAffine( image, mat, (64,64) ) 39 | 40 | return warped_image, target_image 41 | 42 | -------------------------------------------------------------------------------- /model.py: -------------------------------------------------------------------------------- 1 | from keras.models import Model 2 | from keras.layers import Input, Dense, Flatten, Reshape 3 | from keras.layers.advanced_activations import LeakyReLU 4 | from keras.layers.convolutional import Conv2D 5 | from keras.optimizers import Adam 6 | 7 | from pixel_shuffler import PixelShuffler 8 | 9 | optimizer = Adam( lr=5e-5, beta_1=0.5, beta_2=0.999 ) 10 | 11 | IMAGE_SHAPE = (64,64,3) 12 | ENCODER_DIM = 1024 13 | 14 | def conv( filters ): 15 | def block(x): 16 | x = Conv2D( filters, kernel_size=5, strides=2, padding='same' )(x) 17 | x = LeakyReLU(0.1)(x) 18 | return x 19 | return block 20 | 21 | def upscale( filters ): 22 | def block(x): 23 | x = Conv2D( filters*4, kernel_size=3, padding='same' )(x) 24 | x = LeakyReLU(0.1)(x) 25 | x = PixelShuffler()(x) 26 | return x 27 | return block 28 | 29 | def Encoder(): 30 | input_ = Input( shape=IMAGE_SHAPE ) 31 | x = input_ 32 | x = conv( 128)(x) 33 | x = conv( 256)(x) 34 | x = conv( 512)(x) 35 | x = conv(1024)(x) 36 | x = Dense( ENCODER_DIM )( Flatten()(x) ) 37 | x = Dense(4*4*1024)(x) 38 | x = Reshape((4,4,1024))(x) 39 | x = upscale(512)(x) 40 | return Model( input_, x ) 41 | 42 | def Decoder(): 43 | input_ = Input( shape=(8,8,512) ) 44 | x = input_ 45 | x = upscale(256)(x) 46 | x = upscale(128)(x) 47 | x = upscale( 64)(x) 48 | x = Conv2D( 3, kernel_size=5, padding='same', activation='sigmoid' )(x) 49 | return Model( input_, x ) 50 | 51 | encoder = Encoder() 52 | decoder_A = Decoder() 53 | decoder_B = Decoder() 54 | 55 | x = Input( shape=IMAGE_SHAPE ) 56 | 57 | autoencoder_A = Model( x, decoder_A( encoder(x) ) ) 58 | autoencoder_B = Model( x, decoder_B( encoder(x) ) ) 59 | autoencoder_A.compile( optimizer=optimizer, loss='mean_absolute_error' ) 60 | autoencoder_B.compile( optimizer=optimizer, loss='mean_absolute_error' ) 61 | 62 | -------------------------------------------------------------------------------- /pixel_shuffler.py: -------------------------------------------------------------------------------- 1 | # PixelShuffler layer for Keras 2 | # by t-ae 3 | # https://gist.github.com/t-ae/6e1016cc188104d123676ccef3264981 4 | 5 | from keras.utils import conv_utils 6 | from keras.engine.topology import Layer 7 | import keras.backend as K 8 | 9 | class PixelShuffler(Layer): 10 | def __init__(self, size=(2, 2), data_format=None, **kwargs): 11 | super(PixelShuffler, self).__init__(**kwargs) 12 | self.data_format = conv_utils.normalize_data_format(data_format) 13 | self.size = conv_utils.normalize_tuple(size, 2, 'size') 14 | 15 | def call(self, inputs): 16 | 17 | input_shape = K.int_shape(inputs) 18 | if len(input_shape) != 4: 19 | raise ValueError('Inputs should have rank ' + 20 | str(4) + 21 | '; Received input shape:', str(input_shape)) 22 | 23 | if self.data_format == 'channels_first': 24 | batch_size, c, h, w = input_shape 25 | if batch_size is None: 26 | batch_size = -1 27 | rh, rw = self.size 28 | oh, ow = h * rh, w * rw 29 | oc = c // (rh * rw) 30 | 31 | out = K.reshape(inputs, (batch_size, rh, rw, oc, h, w)) 32 | out = K.permute_dimensions(out, (0, 3, 4, 1, 5, 2)) 33 | out = K.reshape(out, (batch_size, oc, oh, ow)) 34 | return out 35 | 36 | elif self.data_format == 'channels_last': 37 | batch_size, h, w, c = input_shape 38 | if batch_size is None: 39 | batch_size = -1 40 | rh, rw = self.size 41 | oh, ow = h * rh, w * rw 42 | oc = c // (rh * rw) 43 | 44 | out = K.reshape(inputs, (batch_size, h, w, rh, rw, oc)) 45 | out = K.permute_dimensions(out, (0, 1, 3, 2, 4, 5)) 46 | out = K.reshape(out, (batch_size, oh, ow, oc)) 47 | return out 48 | 49 | def compute_output_shape(self, input_shape): 50 | 51 | if len(input_shape) != 4: 52 | raise ValueError('Inputs should have rank ' + 53 | str(4) + 54 | '; Received input shape:', str(input_shape)) 55 | 56 | if self.data_format == 'channels_first': 57 | height = input_shape[2] * self.size[0] if input_shape[2] is not None else None 58 | width = input_shape[3] * self.size[1] if input_shape[3] is not None else None 59 | channels = input_shape[1] // self.size[0] // self.size[1] 60 | 61 | if channels * self.size[0] * self.size[1] != input_shape[1]: 62 | raise ValueError('channels of input and size are incompatible') 63 | 64 | return (input_shape[0], 65 | channels, 66 | height, 67 | width) 68 | 69 | elif self.data_format == 'channels_last': 70 | height = input_shape[1] * self.size[0] if input_shape[1] is not None else None 71 | width = input_shape[2] * self.size[1] if input_shape[2] is not None else None 72 | channels = input_shape[3] // self.size[0] // self.size[1] 73 | 74 | if channels * self.size[0] * self.size[1] != input_shape[3]: 75 | raise ValueError('channels of input and size are incompatible') 76 | 77 | return (input_shape[0], 78 | height, 79 | width, 80 | channels) 81 | 82 | def get_config(self): 83 | config = {'size': self.size, 84 | 'data_format': self.data_format} 85 | base_config = super(PixelShuffler, self).get_config() 86 | 87 | return dict(list(base_config.items()) + list(config.items())) 88 | -------------------------------------------------------------------------------- /script.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy 3 | from pathlib import Path 4 | 5 | from utils import get_image_paths 6 | 7 | from model import autoencoder_A 8 | from model import autoencoder_B 9 | from model import encoder, decoder_A, decoder_B 10 | 11 | encoder .load_weights( "models/encoder.h5" ) 12 | decoder_A.load_weights( "models/decoder_A.h5" ) 13 | decoder_B.load_weights( "models/decoder_B.h5" ) 14 | 15 | images_A = get_image_paths( "data/trump" ) 16 | images_B = get_image_paths( "data/cage" ) 17 | 18 | def convert_one_image( autoencoder, image ): 19 | assert image.shape == (256,256,3) 20 | crop = slice(48,208) 21 | face = image[crop,crop] 22 | face = cv2.resize( face, (64,64) ) 23 | face = numpy.expand_dims( face, 0 ) 24 | new_face = autoencoder.predict( face / 255.0 )[0] 25 | new_face = numpy.clip( new_face * 255, 0, 255 ).astype( image.dtype ) 26 | new_face = cv2.resize( new_face, (160,160) ) 27 | new_image = image.copy() 28 | new_image[crop,crop] = new_face 29 | return new_image 30 | 31 | output_dir = Path( 'output' ) 32 | output_dir.mkdir( parents=True, exist_ok=True ) 33 | 34 | for fn in images_A: 35 | image = cv2.imread(fn) 36 | new_image = convert_one_image( autoencoder_B, image ) 37 | output_file = output_dir / Path(fn).name 38 | cv2.imwrite( str(output_file), new_image ) -------------------------------------------------------------------------------- /train.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy 3 | 4 | from utils import get_image_paths, load_images, stack_images 5 | from training_data import get_training_data 6 | 7 | from model import autoencoder_A 8 | from model import autoencoder_B 9 | from model import encoder, decoder_A, decoder_B 10 | 11 | try: 12 | encoder .load_weights( "models/encoder.h5" ) 13 | decoder_A.load_weights( "models/decoder_A.h5" ) 14 | decoder_B.load_weights( "models/decoder_B.h5" ) 15 | except: 16 | pass 17 | 18 | def save_model_weights(): 19 | encoder .save_weights( "models/encoder.h5" ) 20 | decoder_A.save_weights( "models/decoder_A.h5" ) 21 | decoder_B.save_weights( "models/decoder_B.h5" ) 22 | print( "save model weights" ) 23 | 24 | images_A = get_image_paths( "data/trump" ) 25 | images_B = get_image_paths( "data/cage" ) 26 | images_A = load_images( images_A ) / 255.0 27 | images_B = load_images( images_B ) / 255.0 28 | 29 | images_A += images_B.mean( axis=(0,1,2) ) - images_A.mean( axis=(0,1,2) ) 30 | 31 | print( "press 'q' to stop training and save model" ) 32 | 33 | for epoch in range(1000000): 34 | batch_size = 64 35 | warped_A, target_A = get_training_data( images_A, batch_size ) 36 | warped_B, target_B = get_training_data( images_B, batch_size ) 37 | 38 | loss_A = autoencoder_A.train_on_batch( warped_A, target_A ) 39 | loss_B = autoencoder_B.train_on_batch( warped_B, target_B ) 40 | print( loss_A, loss_B ) 41 | 42 | if epoch % 100 == 0: 43 | save_model_weights() 44 | test_A = target_A[0:14] 45 | test_B = target_B[0:14] 46 | 47 | figure_A = numpy.stack([ 48 | test_A, 49 | autoencoder_A.predict( test_A ), 50 | autoencoder_B.predict( test_A ), 51 | ], axis=1 ) 52 | figure_B = numpy.stack([ 53 | test_B, 54 | autoencoder_B.predict( test_B ), 55 | autoencoder_A.predict( test_B ), 56 | ], axis=1 ) 57 | 58 | figure = numpy.concatenate( [ figure_A, figure_B ], axis=0 ) 59 | figure = figure.reshape( (4,7) + figure.shape[1:] ) 60 | figure = stack_images( figure ) 61 | 62 | figure = numpy.clip( figure * 255, 0, 255 ).astype('uint8') 63 | 64 | cv2.imshow( "", figure ) 65 | key = cv2.waitKey(1) 66 | if key == ord('q'): 67 | save_model_weights() 68 | exit() 69 | 70 | -------------------------------------------------------------------------------- /training_data.py: -------------------------------------------------------------------------------- 1 | import numpy 2 | from image_augmentation import random_transform 3 | from image_augmentation import random_warp 4 | 5 | random_transform_args = { 6 | 'rotation_range': 10, 7 | 'zoom_range': 0.05, 8 | 'shift_range': 0.05, 9 | 'random_flip': 0.4, 10 | } 11 | 12 | def get_training_data( images, batch_size ): 13 | indices = numpy.random.randint( len(images), size=batch_size ) 14 | for i,index in enumerate(indices): 15 | image = images[index] 16 | image = random_transform( image, **random_transform_args ) 17 | warped_img, target_img = random_warp( image ) 18 | 19 | if i == 0: 20 | warped_images = numpy.empty( (batch_size,) + warped_img.shape, warped_img.dtype ) 21 | target_images = numpy.empty( (batch_size,) + target_img.shape, warped_img.dtype ) 22 | 23 | warped_images[i] = warped_img 24 | target_images[i] = target_img 25 | 26 | return warped_images, target_images 27 | -------------------------------------------------------------------------------- /umeyama.py: -------------------------------------------------------------------------------- 1 | ## License (Modified BSD) 2 | ## Copyright (C) 2011, the scikit-image team All rights reserved. 3 | ## 4 | ## Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 5 | ## 6 | ## Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 7 | ## Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 8 | ## Neither the name of skimage nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. 9 | ## THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 10 | 11 | # umeyama function from scikit-image/skimage/transform/_geometric.py 12 | 13 | import numpy as np 14 | 15 | def umeyama( src, dst, estimate_scale ): 16 | """Estimate N-D similarity transformation with or without scaling. 17 | Parameters 18 | ---------- 19 | src : (M, N) array 20 | Source coordinates. 21 | dst : (M, N) array 22 | Destination coordinates. 23 | estimate_scale : bool 24 | Whether to estimate scaling factor. 25 | Returns 26 | ------- 27 | T : (N + 1, N + 1) 28 | The homogeneous similarity transformation matrix. The matrix contains 29 | NaN values only if the problem is not well-conditioned. 30 | References 31 | ---------- 32 | .. [1] "Least-squares estimation of transformation parameters between two 33 | point patterns", Shinji Umeyama, PAMI 1991, DOI: 10.1109/34.88573 34 | """ 35 | 36 | num = src.shape[0] 37 | dim = src.shape[1] 38 | 39 | # Compute mean of src and dst. 40 | src_mean = src.mean(axis=0) 41 | dst_mean = dst.mean(axis=0) 42 | 43 | # Subtract mean from src and dst. 44 | src_demean = src - src_mean 45 | dst_demean = dst - dst_mean 46 | 47 | # Eq. (38). 48 | A = np.dot(dst_demean.T, src_demean) / num 49 | 50 | # Eq. (39). 51 | d = np.ones((dim,), dtype=np.double) 52 | if np.linalg.det(A) < 0: 53 | d[dim - 1] = -1 54 | 55 | T = np.eye(dim + 1, dtype=np.double) 56 | 57 | U, S, V = np.linalg.svd(A) 58 | 59 | # Eq. (40) and (43). 60 | rank = np.linalg.matrix_rank(A) 61 | if rank == 0: 62 | return np.nan * T 63 | elif rank == dim - 1: 64 | if np.linalg.det(U) * np.linalg.det(V) > 0: 65 | T[:dim, :dim] = np.dot(U, V) 66 | else: 67 | s = d[dim - 1] 68 | d[dim - 1] = -1 69 | T[:dim, :dim] = np.dot(U, np.dot(np.diag(d), V)) 70 | d[dim - 1] = s 71 | else: 72 | T[:dim, :dim] = np.dot(U, np.dot(np.diag(d), V.T)) 73 | 74 | if estimate_scale: 75 | # Eq. (41) and (42). 76 | scale = 1.0 / src_demean.var(axis=0).sum() * np.dot(S, d) 77 | else: 78 | scale = 1.0 79 | 80 | T[:dim, dim] = dst_mean - scale * np.dot(T[:dim, :dim], src_mean.T) 81 | T[:dim, :dim] *= scale 82 | 83 | return T 84 | 85 | -------------------------------------------------------------------------------- /utils.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy 3 | import os 4 | 5 | def get_image_paths( directory ): 6 | return [ x.path for x in os.scandir( directory ) if x.name.endswith(".jpg") or x.name.endswith(".png") ] 7 | 8 | def load_images( image_paths, convert=None ): 9 | iter_all_images = ( cv2.imread(fn) for fn in image_paths ) 10 | if convert: 11 | iter_all_images = ( convert(img) for img in iter_all_images ) 12 | for i,image in enumerate( iter_all_images ): 13 | if i == 0: 14 | all_images = numpy.empty( ( len(image_paths), ) + image.shape, dtype=image.dtype ) 15 | all_images[i] = image 16 | return all_images 17 | 18 | def get_transpose_axes( n ): 19 | if n % 2 == 0: 20 | y_axes = list( range( 1, n-1, 2 ) ) 21 | x_axes = list( range( 0, n-1, 2 ) ) 22 | else: 23 | y_axes = list( range( 0, n-1, 2 ) ) 24 | x_axes = list( range( 1, n-1, 2 ) ) 25 | return y_axes, x_axes, [n-1] 26 | 27 | def stack_images( images ): 28 | images_shape = numpy.array( images.shape ) 29 | new_axes = get_transpose_axes( len( images_shape ) ) 30 | new_shape = [ numpy.prod( images_shape[x] ) for x in new_axes ] 31 | return numpy.transpose( 32 | images, 33 | axes = numpy.concatenate( new_axes ) 34 | ).reshape( new_shape ) 35 | --------------------------------------------------------------------------------