├── .gitignore ├── 00-convert_video_to_image.py ├── 01a-crop_faces_with_mtcnn.py ├── 01b-crop_faces_with_azure-vision-api.py ├── 02-prepare_fake_real_dataset.py ├── 03-train_cnn.py ├── LICENSE ├── README.md ├── img ├── dfdetect-home.png └── sample_dataset.png ├── prepared_dataset ├── fake │ ├── aagfhgtpmv-009-00.png │ ├── aapnvogymq-001-00.png │ ├── aapnvogymq-004-01.png │ ├── aapnvogymq-005-01.png │ ├── aapnvogymq-009-00.png │ ├── aapnvogymq-009-01.png │ ├── abofeumbvv-008-00.png │ ├── abqwwspghj-001-00.png │ ├── abqwwspghj-005-00.png │ ├── abqwwspghj-007-00.png │ └── abqwwspghj-008-00.png └── real │ ├── abarnvbtwb-000-00.png │ ├── abarnvbtwb-001-00.png │ ├── abarnvbtwb-002-00.png │ ├── abarnvbtwb-003-00.png │ ├── abarnvbtwb-004-00.png │ ├── abarnvbtwb-005-00.png │ ├── abarnvbtwb-006-00.png │ ├── abarnvbtwb-007-00.png │ ├── abarnvbtwb-008-00.png │ ├── abarnvbtwb-009-00.png │ └── abarnvbtwb-010-00.png ├── requirements.txt ├── split_dataset ├── test │ ├── fake │ │ ├── aagfhgtpmv-009-00.png │ │ └── aapnvogymq-004-01.png │ └── real │ │ ├── abarnvbtwb-000-00.png │ │ └── abarnvbtwb-002-00.png ├── train │ ├── fake │ │ ├── aapnvogymq-001-00.png │ │ ├── aapnvogymq-005-01.png │ │ ├── aapnvogymq-009-00.png │ │ ├── aapnvogymq-009-01.png │ │ ├── abofeumbvv-008-00.png │ │ ├── abqwwspghj-005-00.png │ │ ├── abqwwspghj-007-00.png │ │ └── abqwwspghj-008-00.png │ └── real │ │ ├── abarnvbtwb-001-00.png │ │ ├── abarnvbtwb-003-00.png │ │ ├── abarnvbtwb-004-00.png │ │ ├── abarnvbtwb-005-00.png │ │ ├── abarnvbtwb-006-00.png │ │ ├── abarnvbtwb-008-00.png │ │ ├── abarnvbtwb-009-00.png │ │ └── abarnvbtwb-010-00.png └── val │ ├── fake │ └── abqwwspghj-001-00.png │ └── real │ └── abarnvbtwb-007-00.png └── train_sample_videos ├── aagfhgtpmv.mp4 ├── aapnvogymq.mp4 ├── abarnvbtwb.mp4 ├── abofeumbvv.mp4 ├── abqwwspghj.mp4 └── metadata.json /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | 131 | #temp dataset folders 132 | tmp_*/ 133 | mtcnn/ -------------------------------------------------------------------------------- /00-convert_video_to_image.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import cv2 4 | import math 5 | 6 | base_path = '.\\train_sample_videos\\' 7 | 8 | def get_filename_only(file_path): 9 | file_basename = os.path.basename(file_path) 10 | filename_only = file_basename.split('.')[0] 11 | return filename_only 12 | 13 | with open(os.path.join(base_path, 'metadata.json')) as metadata_json: 14 | metadata = json.load(metadata_json) 15 | print(len(metadata)) 16 | 17 | for filename in metadata.keys(): 18 | print(filename) 19 | if (filename.endswith(".mp4")): 20 | tmp_path = os.path.join(base_path, get_filename_only(filename)) 21 | print('Creating Directory: ' + tmp_path) 22 | os.makedirs(tmp_path, exist_ok=True) 23 | print('Converting Video to Images...') 24 | count = 0 25 | video_file = os.path.join(base_path, filename) 26 | cap = cv2.VideoCapture(video_file) 27 | frame_rate = cap.get(5) #frame rate 28 | while(cap.isOpened()): 29 | frame_id = cap.get(1) #current frame number 30 | ret, frame = cap.read() 31 | if (ret != True): 32 | break 33 | if (frame_id % math.floor(frame_rate) == 0): 34 | print('Original Dimensions: ', frame.shape) 35 | if frame.shape[1] < 300: 36 | scale_ratio = 2 37 | elif frame.shape[1] > 1900: 38 | scale_ratio = 0.33 39 | elif frame.shape[1] > 1000 and frame.shape[1] <= 1900 : 40 | scale_ratio = 0.5 41 | else: 42 | scale_ratio = 1 43 | print('Scale Ratio: ', scale_ratio) 44 | 45 | width = int(frame.shape[1] * scale_ratio) 46 | height = int(frame.shape[0] * scale_ratio) 47 | dim = (width, height) 48 | new_frame = cv2.resize(frame, dim, interpolation = cv2.INTER_AREA) 49 | print('Resized Dimensions: ', new_frame.shape) 50 | 51 | new_filename = '{}-{:03d}.png'.format(os.path.join(tmp_path, get_filename_only(filename)), count) 52 | count = count + 1 53 | cv2.imwrite(new_filename, new_frame) 54 | cap.release() 55 | print("Done!") 56 | else: 57 | continue 58 | -------------------------------------------------------------------------------- /01a-crop_faces_with_mtcnn.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | from mtcnn import MTCNN 3 | import sys, os.path 4 | import json 5 | from keras import backend as K 6 | import tensorflow as tf 7 | print(tf.__version__) 8 | tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) 9 | 10 | physical_devices = tf.config.list_physical_devices('GPU') 11 | print(physical_devices) 12 | tf.config.experimental.set_memory_growth(physical_devices[0], True) 13 | 14 | base_path = '.\\train_sample_videos\\' 15 | 16 | def get_filename_only(file_path): 17 | file_basename = os.path.basename(file_path) 18 | filename_only = file_basename.split('.')[0] 19 | return filename_only 20 | 21 | with open(os.path.join(base_path, 'metadata.json')) as metadata_json: 22 | metadata = json.load(metadata_json) 23 | print(len(metadata)) 24 | 25 | for filename in metadata.keys(): 26 | tmp_path = os.path.join(base_path, get_filename_only(filename)) 27 | print('Processing Directory: ' + tmp_path) 28 | frame_images = [x for x in os.listdir(tmp_path) if os.path.isfile(os.path.join(tmp_path, x))] 29 | faces_path = os.path.join(tmp_path, 'faces') 30 | print('Creating Directory: ' + faces_path) 31 | os.makedirs(faces_path, exist_ok=True) 32 | print('Cropping Faces from Images...') 33 | 34 | for frame in frame_images: 35 | print('Processing ', frame) 36 | detector = MTCNN() 37 | image = cv2.cvtColor(cv2.imread(os.path.join(tmp_path, frame)), cv2.COLOR_BGR2RGB) 38 | results = detector.detect_faces(image) 39 | print('Face Detected: ', len(results)) 40 | count = 0 41 | 42 | for result in results: 43 | bounding_box = result['box'] 44 | print(bounding_box) 45 | confidence = result['confidence'] 46 | print(confidence) 47 | if len(results) < 2 or confidence > 0.95: 48 | margin_x = bounding_box[2] * 0.3 # 30% as the margin 49 | margin_y = bounding_box[3] * 0.3 # 30% as the margin 50 | x1 = int(bounding_box[0] - margin_x) 51 | if x1 < 0: 52 | x1 = 0 53 | x2 = int(bounding_box[0] + bounding_box[2] + margin_x) 54 | if x2 > image.shape[1]: 55 | x2 = image.shape[1] 56 | y1 = int(bounding_box[1] - margin_y) 57 | if y1 < 0: 58 | y1 = 0 59 | y2 = int(bounding_box[1] + bounding_box[3] + margin_y) 60 | if y2 > image.shape[0]: 61 | y2 = image.shape[0] 62 | print(x1, y1, x2, y2) 63 | crop_image = image[y1:y2, x1:x2] 64 | new_filename = '{}-{:02d}.png'.format(os.path.join(faces_path, get_filename_only(frame)), count) 65 | count = count + 1 66 | cv2.imwrite(new_filename, cv2.cvtColor(crop_image, cv2.COLOR_RGB2BGR)) 67 | else: 68 | print('Skipped a face..') 69 | -------------------------------------------------------------------------------- /01b-crop_faces_with_azure-vision-api.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import sys, os.path 3 | import json 4 | import http.client, urllib.request, urllib.parse, urllib.error, base64 5 | 6 | base_path = '.\\train_sample_videos\\' 7 | AZURE_COMPUTER_VISION_NAME = '----REPLACE-WITH-YOUR-SERVICE-NAME----' # e.g. xxxxxxxxxx.cognitiveservices.azure.com 8 | AZURE_COMPUTER_VISION_API_KEY = '----REPLACE-WITH-YOUR-KEY----' 9 | 10 | def get_filename_only(file_path): 11 | file_basename = os.path.basename(file_path) 12 | filename_only = file_basename.split('.')[0] 13 | return filename_only 14 | 15 | with open(os.path.join(base_path, 'metadata.json')) as metadata_json: 16 | metadata = json.load(metadata_json) 17 | print(len(metadata)) 18 | 19 | for filename in metadata.keys(): 20 | tmp_path = os.path.join(base_path, get_filename_only(filename)) 21 | print('Processing Directory: ' + tmp_path) 22 | frame_images = [x for x in os.listdir(tmp_path) if os.path.isfile(os.path.join(tmp_path, x))] 23 | faces_path = os.path.join(tmp_path, 'faces') 24 | print('Creating Directory: ' + faces_path) 25 | os.makedirs(faces_path, exist_ok=True) 26 | print('Cropping Faces from Images...') 27 | 28 | for frame in frame_images: 29 | print('Processing ', frame) 30 | image = cv2.cvtColor(cv2.imread(os.path.join(tmp_path, frame)), cv2.COLOR_BGR2RGB) 31 | 32 | # Open the binary file 33 | with open(os.path.join(tmp_path, frame), 'rb') as file_contents: 34 | img_data = file_contents.read() 35 | 36 | ######### Azure Computer Vision API 37 | headers = { 38 | # Request headers 39 | 'Content-Type': 'application/octet-stream', 40 | 'Ocp-Apim-Subscription-Key': AZURE_COMPUTER_VISION_API_KEY, 41 | } 42 | 43 | params = urllib.parse.urlencode({ 44 | # Request parameters 45 | 'visualFeatures': 'Faces' 46 | }) 47 | 48 | try: 49 | conn = http.client.HTTPSConnection(AZURE_COMPUTER_VISION_NAME) 50 | conn.request("POST", "/vision/v3.0/analyze?%s" % params, img_data, headers) 51 | response = conn.getresponse().read() 52 | data = json.loads(response.decode('utf-8')) 53 | print(data) 54 | conn.close() 55 | except Exception as e: 56 | print("[Errno {0}] {1}".format(e.errno, e.strerror)) 57 | continue 58 | 59 | print(data['faces']) 60 | print('Face Detected: ', len(data['faces'])) 61 | count = 0 62 | 63 | for result in data['faces']: 64 | bounding_box = [] 65 | bounding_box.append(result['faceRectangle']['left']) 66 | bounding_box.append(result['faceRectangle']['top']) 67 | bounding_box.append(result['faceRectangle']['width']) 68 | bounding_box.append(result['faceRectangle']['height']) 69 | print(bounding_box) 70 | 71 | margin_x = bounding_box[2] * 0.3 # 30% as the margin 72 | margin_y = bounding_box[3] * 0.3 # 30% as the margin 73 | x1 = int(bounding_box[0] - margin_x) 74 | if x1 < 0: 75 | x1 = 0 76 | x2 = int(bounding_box[0] + bounding_box[2] + margin_x) 77 | if x2 > image.shape[1]: 78 | x2 = image.shape[1] 79 | y1 = int(bounding_box[1] - margin_y) 80 | if y1 < 0: 81 | y1 = 0 82 | y2 = int(bounding_box[1] + bounding_box[3] + margin_y) 83 | if y2 > image.shape[0]: 84 | y2 = image.shape[0] 85 | print(x1, y1, x2, y2) 86 | crop_image = image[y1:y2, x1:x2] 87 | new_filename = '{}-{:02d}.png'.format(os.path.join(faces_path, get_filename_only(frame)), count) 88 | count = count + 1 89 | cv2.imwrite(new_filename, cv2.cvtColor(crop_image, cv2.COLOR_RGB2BGR)) 90 | -------------------------------------------------------------------------------- /02-prepare_fake_real_dataset.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | from distutils.dir_util import copy_tree 4 | import shutil 5 | import numpy as np 6 | import split_folders 7 | 8 | base_path = '.\\train_sample_videos\\' 9 | dataset_path = '.\\prepared_dataset\\' 10 | print('Creating Directory: ' + dataset_path) 11 | os.makedirs(dataset_path, exist_ok=True) 12 | 13 | tmp_fake_path = '.\\tmp_fake_faces' 14 | print('Creating Directory: ' + tmp_fake_path) 15 | os.makedirs(tmp_fake_path, exist_ok=True) 16 | 17 | def get_filename_only(file_path): 18 | file_basename = os.path.basename(file_path) 19 | filename_only = file_basename.split('.')[0] 20 | return filename_only 21 | 22 | with open(os.path.join(base_path, 'metadata.json')) as metadata_json: 23 | metadata = json.load(metadata_json) 24 | print(len(metadata)) 25 | 26 | real_path = os.path.join(dataset_path, 'real') 27 | print('Creating Directory: ' + real_path) 28 | os.makedirs(real_path, exist_ok=True) 29 | 30 | fake_path = os.path.join(dataset_path, 'fake') 31 | print('Creating Directory: ' + fake_path) 32 | os.makedirs(fake_path, exist_ok=True) 33 | 34 | for filename in metadata.keys(): 35 | print(filename) 36 | print(metadata[filename]['label']) 37 | tmp_path = os.path.join(os.path.join(base_path, get_filename_only(filename)), 'faces') 38 | print(tmp_path) 39 | if os.path.exists(tmp_path): 40 | if metadata[filename]['label'] == 'REAL': 41 | print('Copying to :' + real_path) 42 | copy_tree(tmp_path, real_path) 43 | elif metadata[filename]['label'] == 'FAKE': 44 | print('Copying to :' + tmp_fake_path) 45 | copy_tree(tmp_path, tmp_fake_path) 46 | else: 47 | print('Ignored..') 48 | 49 | all_real_faces = [f for f in os.listdir(real_path) if os.path.isfile(os.path.join(real_path, f))] 50 | print('Total Number of Real faces: ', len(all_real_faces)) 51 | 52 | all_fake_faces = [f for f in os.listdir(tmp_fake_path) if os.path.isfile(os.path.join(tmp_fake_path, f))] 53 | print('Total Number of Fake faces: ', len(all_fake_faces)) 54 | 55 | random_faces = np.random.choice(all_fake_faces, len(all_real_faces), replace=False) 56 | for fname in random_faces: 57 | src = os.path.join(tmp_fake_path, fname) 58 | dst = os.path.join(fake_path, fname) 59 | shutil.copyfile(src, dst) 60 | 61 | print('Down-sampling Done!') 62 | 63 | # Split into Train/ Val/ Test folders 64 | split_folders.ratio(dataset_path, output='split_dataset', seed=1377, ratio=(.8, .1, .1)) # default values 65 | print('Train/ Val/ Test Split Done!') -------------------------------------------------------------------------------- /03-train_cnn.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | from distutils.dir_util import copy_tree 4 | import shutil 5 | import pandas as pd 6 | 7 | # TensorFlow and tf.keras 8 | import tensorflow as tf 9 | from tensorflow.keras import backend as K 10 | print('TensorFlow version: ', tf.__version__) 11 | 12 | # Set to force CPU 13 | #os.environ['CUDA_VISIBLE_DEVICES'] = '-1' 14 | #if tf.test.gpu_device_name(): 15 | # print('GPU found') 16 | #else: 17 | # print("No GPU found") 18 | 19 | dataset_path = '.\\split_dataset\\' 20 | 21 | tmp_debug_path = '.\\tmp_debug' 22 | print('Creating Directory: ' + tmp_debug_path) 23 | os.makedirs(tmp_debug_path, exist_ok=True) 24 | 25 | def get_filename_only(file_path): 26 | file_basename = os.path.basename(file_path) 27 | filename_only = file_basename.split('.')[0] 28 | return filename_only 29 | 30 | from tensorflow.keras.preprocessing.image import ImageDataGenerator 31 | from tensorflow.keras import applications 32 | from efficientnet.tfkeras import EfficientNetB0 #EfficientNetB1, EfficientNetB2, EfficientNetB3, EfficientNetB4, EfficientNetB5, EfficientNetB6, EfficientNetB7 33 | from tensorflow.keras.models import Sequential 34 | from tensorflow.keras.layers import Dense, Dropout 35 | from tensorflow.keras.optimizers import Adam 36 | from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint 37 | from tensorflow.keras.models import load_model 38 | 39 | input_size = 128 40 | batch_size_num = 32 41 | train_path = os.path.join(dataset_path, 'train') 42 | val_path = os.path.join(dataset_path, 'val') 43 | test_path = os.path.join(dataset_path, 'test') 44 | 45 | train_datagen = ImageDataGenerator( 46 | rescale = 1/255, #rescale the tensor values to [0,1] 47 | rotation_range = 10, 48 | width_shift_range = 0.1, 49 | height_shift_range = 0.1, 50 | shear_range = 0.2, 51 | zoom_range = 0.1, 52 | horizontal_flip = True, 53 | fill_mode = 'nearest' 54 | ) 55 | 56 | train_generator = train_datagen.flow_from_directory( 57 | directory = train_path, 58 | target_size = (input_size, input_size), 59 | color_mode = "rgb", 60 | class_mode = "binary", #"categorical", "binary", "sparse", "input" 61 | batch_size = batch_size_num, 62 | shuffle = True 63 | #save_to_dir = tmp_debug_path 64 | ) 65 | 66 | val_datagen = ImageDataGenerator( 67 | rescale = 1/255 #rescale the tensor values to [0,1] 68 | ) 69 | 70 | val_generator = val_datagen.flow_from_directory( 71 | directory = val_path, 72 | target_size = (input_size, input_size), 73 | color_mode = "rgb", 74 | class_mode = "binary", #"categorical", "binary", "sparse", "input" 75 | batch_size = batch_size_num, 76 | shuffle = True 77 | #save_to_dir = tmp_debug_path 78 | ) 79 | 80 | test_datagen = ImageDataGenerator( 81 | rescale = 1/255 #rescale the tensor values to [0,1] 82 | ) 83 | 84 | test_generator = test_datagen.flow_from_directory( 85 | directory = test_path, 86 | classes=['real', 'fake'], 87 | target_size = (input_size, input_size), 88 | color_mode = "rgb", 89 | class_mode = None, 90 | batch_size = 1, 91 | shuffle = False 92 | ) 93 | 94 | # Train a CNN classifier 95 | efficient_net = EfficientNetB0( 96 | weights = 'imagenet', 97 | input_shape = (input_size, input_size, 3), 98 | include_top = False, 99 | pooling = 'max' 100 | ) 101 | 102 | model = Sequential() 103 | model.add(efficient_net) 104 | model.add(Dense(units = 512, activation = 'relu')) 105 | model.add(Dropout(0.5)) 106 | model.add(Dense(units = 128, activation = 'relu')) 107 | model.add(Dense(units = 1, activation = 'sigmoid')) 108 | model.summary() 109 | 110 | # Compile model 111 | model.compile(optimizer = Adam(lr=0.0001), loss='binary_crossentropy', metrics=['accuracy']) 112 | 113 | checkpoint_filepath = '.\\tmp_checkpoint' 114 | print('Creating Directory: ' + checkpoint_filepath) 115 | os.makedirs(checkpoint_filepath, exist_ok=True) 116 | 117 | custom_callbacks = [ 118 | EarlyStopping( 119 | monitor = 'val_loss', 120 | mode = 'min', 121 | patience = 5, 122 | verbose = 1 123 | ), 124 | ModelCheckpoint( 125 | filepath = os.path.join(checkpoint_filepath, 'best_model.h5'), 126 | monitor = 'val_loss', 127 | mode = 'min', 128 | verbose = 1, 129 | save_best_only = True 130 | ) 131 | ] 132 | 133 | # Train network 134 | num_epochs = 20 135 | history = model.fit_generator( 136 | train_generator, 137 | epochs = num_epochs, 138 | steps_per_epoch = len(train_generator), 139 | validation_data = val_generator, 140 | validation_steps = len(val_generator), 141 | callbacks = custom_callbacks 142 | ) 143 | print(history.history) 144 | 145 | ''' 146 | # Plot results 147 | import matplotlib.pyplot as plt 148 | 149 | acc = history.history['acc'] 150 | val_acc = history.history['val_acc'] 151 | loss = history.history['loss'] 152 | val_loss = history.history['val_loss'] 153 | 154 | epochs = range(1, len(acc) + 1) 155 | 156 | plt.plot(epochs, acc, 'bo', label = 'Training Accuracy') 157 | plt.plot(epochs, val_acc, 'b', label = 'Validation Accuracy') 158 | plt.title('Training and Validation Accuracy') 159 | plt.legend() 160 | plt.figure() 161 | 162 | plt.plot(epochs, loss, 'bo', label = 'Training loss') 163 | plt.plot(epochs, val_loss, 'b', label = 'Validation Loss') 164 | plt.title('Training and Validation Loss') 165 | plt.legend() 166 | 167 | plt.show() 168 | ''' 169 | 170 | # load the saved model that is considered the best 171 | best_model = load_model(os.path.join(checkpoint_filepath, 'best_model.h5')) 172 | 173 | # Generate predictions 174 | test_generator.reset() 175 | 176 | preds = best_model.predict( 177 | test_generator, 178 | verbose = 1 179 | ) 180 | 181 | test_results = pd.DataFrame({ 182 | "Filename": test_generator.filenames, 183 | "Prediction": preds.flatten() 184 | }) 185 | print(test_results) -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 Aaron Chong 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # DeepFake-Detect 2 | 3 |

4 | 5 |

https://deepfake-detect.com/

6 | 7 | ## Description 8 | 9 | This project aims to guide developers to train a deep learning-based deepfake detection model from scratch using [Python](https://www.python.org), [Keras](https://keras.io) and [TensorFlow](https://www.tensorflow.org). The proposed deepfake detector is based on the state-of-the-art EfficientNet structure with some customizations on the network layers, and the sample models provided were trained against a massive and comprehensive set of deepfake datasets. 10 | 11 | The proposed deepfake detection model is also served via a standard web-based interface at [DF-Detect](https://deepfake-detect.com/) to assist both the general Internet users and digital media providers in identifying potential deepfake contents. It is hoped that such approachable solution could remind Internet users to stay vigilant against fake contents, and ultimately help counter the emergence of deepfakes. 12 | 13 | ### Deepfake Datasets 14 | 15 | Due to the nature of deep neural networks being data-driven, it is necessary to acquire massive deepfake datasets with various different synthesis methods in order to achieve promising results. The following deepfake datasets were used in the final model at [DF-Detect](https://deepfake-detect.com/): 16 | 17 | - [DeepFake-TIMIT](https://www.idiap.ch/dataset/deepfaketimit) 18 | - [FaceForensics++](https://github.com/ondyari/FaceForensics) 19 | - [Google Deep Fake Detection (DFD)](https://ai.googleblog.com/2019/09/contributing-data-to-deepfake-detection.html) 20 | - [Celeb-DF](https://github.com/danmohaha/celeb-deepfakeforensics) 21 | - [Facebook Deepfake Detection Challenge (DFDC)](https://ai.facebook.com/datasets/dfdc/) 22 | 23 |

24 | 25 | Combining all the datasets from different sources would provide us a total of 134,446 videos with approximately 1,140 unique identities and around 20 deepfake synthesis methods. 26 | 27 |
28 | 29 | ## Getting Started 30 | 31 | ### Prerequisites 32 | 33 | - Python 3 34 | - Keras 35 | - TensorFlow 36 | - EfficientNet for TensorFlow Keras 37 | - OpenCV on Wheels 38 | - MTCNN 39 | 40 | ### Installation 41 | 42 | ``` 43 | pip install -r requirements.txt 44 | ``` 45 | 46 | ### Usage 47 | 48 | #### Step 0 - Convert video frames to individual images 49 | 50 | ``` 51 | python 00-convert_video_to_image.py 52 | ``` 53 | 54 | Extract all the video frames from the acquired deepfake datasets above, saving them as individual images for further processing. In order to cater for different video qualities and to optimize for the image processing performance, the following image resizing strategies were implemented: 55 | 56 | - 2x resize for videos with width less than 300 pixels 57 | - 1x resize for videos with width between 300 and 1000 pixels 58 | - 0.5x resize for videos with width between 1000 and 1900 pixels 59 | - 0.33x resize for videos with width greater than 1900 pixels 60 | 61 | #### Step 1 - Extract faces from the deepfake images with MTCNN 62 | 63 | ``` 64 | python 01a-crop_faces_with_mtcnn.py 65 | ``` 66 | 67 | Further process the frame images to crop out the facial parts in order to allow the neural network to focus on capturing the facial manipulation artifacts. In cases where there are more than one subject appearing in the same video frame, each detection result is saved separately to provide better variety for the training dataset. 68 | 69 | - The pre-trained MTCNN model used is coming from this GitHub repo: https://github.com/ipazc/mtcnn 70 | - Added 30% margins from each side of the detected face bounding box 71 | - Used 95% as the confidence threshold to capture the face images 72 | 73 | #### (Optional) Step 1b - Extract faces from the deepfake images with Azure Computer Vision API 74 | 75 | In case you do not have a good enough hardware to run MTCNN, or you want to achieve a faster execution time, you may choose to run **01b** instead of **01a** to leverage the [Azure Computer Vision API](https://azure.microsoft.com/en-us/services/cognitive-services/computer-vision/) for facial recognition. 76 | 77 | ``` 78 | python 01b-crop_faces_with_azure-vision-api.py 79 | ``` 80 | 81 | > Replace the missing parts (*API Name* & *API Key*) before running 82 | 83 | #### Step 2 - Balance and split datasets into various folders 84 | 85 | ``` 86 | python 02-prepare_fake_real_dataset.py 87 | ``` 88 | 89 | As we observed that the number of fakes are much larger than the number of real faces (due to the fact that one real video is usually used for creating multiple deepfakes), we need to perform a down-sampling on the fake dataset based on the number of real crops, in order to tackle for possible class imbalance issues during the training phase. 90 | 91 | We also need to split the dataset into training, validation and testing sets (for example, in the ratio of 80:10:10) as the final step in the data preparation phase. 92 | 93 | #### Step 3 - Model training 94 | 95 | ``` 96 | python 03-train_cnn.py 97 | ``` 98 | 99 | EfficientNet is used as the backbone for the development work. Given that most of the deepfake videos are synthesized using a frame-by-frame approach, we have formulated the deepfake detection task as a binary classification problem such that it would be generally applicable to both video and image contents. 100 | 101 | In this code sample, we have adapted the EfficientNet B0 model in several ways: The top input layer is replaced by an input size of 128x128 with a depth of 3, and the last convolutional output from B0 is fed to a global max pooling layer. In addition, 2 additional fully connected layers have been introduced with ReLU activations, followed by a final output layer with Sigmoid activation to serve as a binary classifier. 102 | 103 | Thus, given a colored square image as the network input, we would expect the model to compute an output between 0 and 1 that indicates the probability of the input image being either deepfake (0) or pristine (1). 104 | 105 | ## Authors 106 | 107 | * **Aaron Chong** - *Initial work* - [aaronchong888](https://github.com/aaronchong888) 108 | * **Hugo Ng** - *Initial work* - [hugoclong](https://github.com/hugoclong) 109 | 110 | See also the list of [contributors](https://github.com/aaronchong888/DeepFake-Detect/contributors) who participated in this project. 111 | 112 | ## License 113 | 114 | This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details 115 | 116 | ## Acknowledgments 117 | 118 | This project is built using the following packages and libraries as listed [here](https://github.com/aaronchong888/DeepFake-Detect/network/dependencies) -------------------------------------------------------------------------------- /img/dfdetect-home.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aaronchong888/DeepFake-Detect/567dd172389fb86db68f03acd9c219be53a9c0dd/img/dfdetect-home.png -------------------------------------------------------------------------------- /img/sample_dataset.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aaronchong888/DeepFake-Detect/567dd172389fb86db68f03acd9c219be53a9c0dd/img/sample_dataset.png -------------------------------------------------------------------------------- /prepared_dataset/fake/aagfhgtpmv-009-00.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aaronchong888/DeepFake-Detect/567dd172389fb86db68f03acd9c219be53a9c0dd/prepared_dataset/fake/aagfhgtpmv-009-00.png -------------------------------------------------------------------------------- /prepared_dataset/fake/aapnvogymq-001-00.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aaronchong888/DeepFake-Detect/567dd172389fb86db68f03acd9c219be53a9c0dd/prepared_dataset/fake/aapnvogymq-001-00.png -------------------------------------------------------------------------------- /prepared_dataset/fake/aapnvogymq-004-01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aaronchong888/DeepFake-Detect/567dd172389fb86db68f03acd9c219be53a9c0dd/prepared_dataset/fake/aapnvogymq-004-01.png -------------------------------------------------------------------------------- /prepared_dataset/fake/aapnvogymq-005-01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aaronchong888/DeepFake-Detect/567dd172389fb86db68f03acd9c219be53a9c0dd/prepared_dataset/fake/aapnvogymq-005-01.png -------------------------------------------------------------------------------- /prepared_dataset/fake/aapnvogymq-009-00.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aaronchong888/DeepFake-Detect/567dd172389fb86db68f03acd9c219be53a9c0dd/prepared_dataset/fake/aapnvogymq-009-00.png -------------------------------------------------------------------------------- /prepared_dataset/fake/aapnvogymq-009-01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aaronchong888/DeepFake-Detect/567dd172389fb86db68f03acd9c219be53a9c0dd/prepared_dataset/fake/aapnvogymq-009-01.png -------------------------------------------------------------------------------- /prepared_dataset/fake/abofeumbvv-008-00.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aaronchong888/DeepFake-Detect/567dd172389fb86db68f03acd9c219be53a9c0dd/prepared_dataset/fake/abofeumbvv-008-00.png -------------------------------------------------------------------------------- /prepared_dataset/fake/abqwwspghj-001-00.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aaronchong888/DeepFake-Detect/567dd172389fb86db68f03acd9c219be53a9c0dd/prepared_dataset/fake/abqwwspghj-001-00.png -------------------------------------------------------------------------------- /prepared_dataset/fake/abqwwspghj-005-00.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aaronchong888/DeepFake-Detect/567dd172389fb86db68f03acd9c219be53a9c0dd/prepared_dataset/fake/abqwwspghj-005-00.png -------------------------------------------------------------------------------- /prepared_dataset/fake/abqwwspghj-007-00.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aaronchong888/DeepFake-Detect/567dd172389fb86db68f03acd9c219be53a9c0dd/prepared_dataset/fake/abqwwspghj-007-00.png -------------------------------------------------------------------------------- /prepared_dataset/fake/abqwwspghj-008-00.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aaronchong888/DeepFake-Detect/567dd172389fb86db68f03acd9c219be53a9c0dd/prepared_dataset/fake/abqwwspghj-008-00.png -------------------------------------------------------------------------------- /prepared_dataset/real/abarnvbtwb-000-00.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aaronchong888/DeepFake-Detect/567dd172389fb86db68f03acd9c219be53a9c0dd/prepared_dataset/real/abarnvbtwb-000-00.png -------------------------------------------------------------------------------- /prepared_dataset/real/abarnvbtwb-001-00.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aaronchong888/DeepFake-Detect/567dd172389fb86db68f03acd9c219be53a9c0dd/prepared_dataset/real/abarnvbtwb-001-00.png -------------------------------------------------------------------------------- /prepared_dataset/real/abarnvbtwb-002-00.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aaronchong888/DeepFake-Detect/567dd172389fb86db68f03acd9c219be53a9c0dd/prepared_dataset/real/abarnvbtwb-002-00.png -------------------------------------------------------------------------------- /prepared_dataset/real/abarnvbtwb-003-00.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aaronchong888/DeepFake-Detect/567dd172389fb86db68f03acd9c219be53a9c0dd/prepared_dataset/real/abarnvbtwb-003-00.png -------------------------------------------------------------------------------- /prepared_dataset/real/abarnvbtwb-004-00.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aaronchong888/DeepFake-Detect/567dd172389fb86db68f03acd9c219be53a9c0dd/prepared_dataset/real/abarnvbtwb-004-00.png -------------------------------------------------------------------------------- /prepared_dataset/real/abarnvbtwb-005-00.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aaronchong888/DeepFake-Detect/567dd172389fb86db68f03acd9c219be53a9c0dd/prepared_dataset/real/abarnvbtwb-005-00.png -------------------------------------------------------------------------------- /prepared_dataset/real/abarnvbtwb-006-00.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aaronchong888/DeepFake-Detect/567dd172389fb86db68f03acd9c219be53a9c0dd/prepared_dataset/real/abarnvbtwb-006-00.png -------------------------------------------------------------------------------- /prepared_dataset/real/abarnvbtwb-007-00.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aaronchong888/DeepFake-Detect/567dd172389fb86db68f03acd9c219be53a9c0dd/prepared_dataset/real/abarnvbtwb-007-00.png -------------------------------------------------------------------------------- /prepared_dataset/real/abarnvbtwb-008-00.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aaronchong888/DeepFake-Detect/567dd172389fb86db68f03acd9c219be53a9c0dd/prepared_dataset/real/abarnvbtwb-008-00.png -------------------------------------------------------------------------------- /prepared_dataset/real/abarnvbtwb-009-00.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aaronchong888/DeepFake-Detect/567dd172389fb86db68f03acd9c219be53a9c0dd/prepared_dataset/real/abarnvbtwb-009-00.png -------------------------------------------------------------------------------- /prepared_dataset/real/abarnvbtwb-010-00.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aaronchong888/DeepFake-Detect/567dd172389fb86db68f03acd9c219be53a9c0dd/prepared_dataset/real/abarnvbtwb-010-00.png -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | numpy 2 | pandas 3 | tensorflow 4 | keras>=2.2.0 5 | keras_applications >= 1.0.7 6 | opencv-python>=4.1.0 7 | mtcnn>=0.1.0 8 | h5py 9 | efficientnet 10 | split_folders -------------------------------------------------------------------------------- /split_dataset/test/fake/aagfhgtpmv-009-00.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aaronchong888/DeepFake-Detect/567dd172389fb86db68f03acd9c219be53a9c0dd/split_dataset/test/fake/aagfhgtpmv-009-00.png -------------------------------------------------------------------------------- /split_dataset/test/fake/aapnvogymq-004-01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aaronchong888/DeepFake-Detect/567dd172389fb86db68f03acd9c219be53a9c0dd/split_dataset/test/fake/aapnvogymq-004-01.png -------------------------------------------------------------------------------- /split_dataset/test/real/abarnvbtwb-000-00.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aaronchong888/DeepFake-Detect/567dd172389fb86db68f03acd9c219be53a9c0dd/split_dataset/test/real/abarnvbtwb-000-00.png -------------------------------------------------------------------------------- /split_dataset/test/real/abarnvbtwb-002-00.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aaronchong888/DeepFake-Detect/567dd172389fb86db68f03acd9c219be53a9c0dd/split_dataset/test/real/abarnvbtwb-002-00.png -------------------------------------------------------------------------------- /split_dataset/train/fake/aapnvogymq-001-00.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aaronchong888/DeepFake-Detect/567dd172389fb86db68f03acd9c219be53a9c0dd/split_dataset/train/fake/aapnvogymq-001-00.png -------------------------------------------------------------------------------- /split_dataset/train/fake/aapnvogymq-005-01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aaronchong888/DeepFake-Detect/567dd172389fb86db68f03acd9c219be53a9c0dd/split_dataset/train/fake/aapnvogymq-005-01.png -------------------------------------------------------------------------------- /split_dataset/train/fake/aapnvogymq-009-00.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aaronchong888/DeepFake-Detect/567dd172389fb86db68f03acd9c219be53a9c0dd/split_dataset/train/fake/aapnvogymq-009-00.png -------------------------------------------------------------------------------- /split_dataset/train/fake/aapnvogymq-009-01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aaronchong888/DeepFake-Detect/567dd172389fb86db68f03acd9c219be53a9c0dd/split_dataset/train/fake/aapnvogymq-009-01.png -------------------------------------------------------------------------------- /split_dataset/train/fake/abofeumbvv-008-00.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aaronchong888/DeepFake-Detect/567dd172389fb86db68f03acd9c219be53a9c0dd/split_dataset/train/fake/abofeumbvv-008-00.png -------------------------------------------------------------------------------- /split_dataset/train/fake/abqwwspghj-005-00.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aaronchong888/DeepFake-Detect/567dd172389fb86db68f03acd9c219be53a9c0dd/split_dataset/train/fake/abqwwspghj-005-00.png -------------------------------------------------------------------------------- /split_dataset/train/fake/abqwwspghj-007-00.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aaronchong888/DeepFake-Detect/567dd172389fb86db68f03acd9c219be53a9c0dd/split_dataset/train/fake/abqwwspghj-007-00.png -------------------------------------------------------------------------------- /split_dataset/train/fake/abqwwspghj-008-00.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aaronchong888/DeepFake-Detect/567dd172389fb86db68f03acd9c219be53a9c0dd/split_dataset/train/fake/abqwwspghj-008-00.png -------------------------------------------------------------------------------- /split_dataset/train/real/abarnvbtwb-001-00.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aaronchong888/DeepFake-Detect/567dd172389fb86db68f03acd9c219be53a9c0dd/split_dataset/train/real/abarnvbtwb-001-00.png -------------------------------------------------------------------------------- /split_dataset/train/real/abarnvbtwb-003-00.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aaronchong888/DeepFake-Detect/567dd172389fb86db68f03acd9c219be53a9c0dd/split_dataset/train/real/abarnvbtwb-003-00.png -------------------------------------------------------------------------------- /split_dataset/train/real/abarnvbtwb-004-00.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aaronchong888/DeepFake-Detect/567dd172389fb86db68f03acd9c219be53a9c0dd/split_dataset/train/real/abarnvbtwb-004-00.png -------------------------------------------------------------------------------- /split_dataset/train/real/abarnvbtwb-005-00.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aaronchong888/DeepFake-Detect/567dd172389fb86db68f03acd9c219be53a9c0dd/split_dataset/train/real/abarnvbtwb-005-00.png -------------------------------------------------------------------------------- /split_dataset/train/real/abarnvbtwb-006-00.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aaronchong888/DeepFake-Detect/567dd172389fb86db68f03acd9c219be53a9c0dd/split_dataset/train/real/abarnvbtwb-006-00.png -------------------------------------------------------------------------------- /split_dataset/train/real/abarnvbtwb-008-00.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aaronchong888/DeepFake-Detect/567dd172389fb86db68f03acd9c219be53a9c0dd/split_dataset/train/real/abarnvbtwb-008-00.png -------------------------------------------------------------------------------- /split_dataset/train/real/abarnvbtwb-009-00.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aaronchong888/DeepFake-Detect/567dd172389fb86db68f03acd9c219be53a9c0dd/split_dataset/train/real/abarnvbtwb-009-00.png -------------------------------------------------------------------------------- /split_dataset/train/real/abarnvbtwb-010-00.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aaronchong888/DeepFake-Detect/567dd172389fb86db68f03acd9c219be53a9c0dd/split_dataset/train/real/abarnvbtwb-010-00.png -------------------------------------------------------------------------------- /split_dataset/val/fake/abqwwspghj-001-00.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aaronchong888/DeepFake-Detect/567dd172389fb86db68f03acd9c219be53a9c0dd/split_dataset/val/fake/abqwwspghj-001-00.png -------------------------------------------------------------------------------- /split_dataset/val/real/abarnvbtwb-007-00.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aaronchong888/DeepFake-Detect/567dd172389fb86db68f03acd9c219be53a9c0dd/split_dataset/val/real/abarnvbtwb-007-00.png -------------------------------------------------------------------------------- /train_sample_videos/aagfhgtpmv.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aaronchong888/DeepFake-Detect/567dd172389fb86db68f03acd9c219be53a9c0dd/train_sample_videos/aagfhgtpmv.mp4 -------------------------------------------------------------------------------- /train_sample_videos/aapnvogymq.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aaronchong888/DeepFake-Detect/567dd172389fb86db68f03acd9c219be53a9c0dd/train_sample_videos/aapnvogymq.mp4 -------------------------------------------------------------------------------- /train_sample_videos/abarnvbtwb.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aaronchong888/DeepFake-Detect/567dd172389fb86db68f03acd9c219be53a9c0dd/train_sample_videos/abarnvbtwb.mp4 -------------------------------------------------------------------------------- /train_sample_videos/abofeumbvv.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aaronchong888/DeepFake-Detect/567dd172389fb86db68f03acd9c219be53a9c0dd/train_sample_videos/abofeumbvv.mp4 -------------------------------------------------------------------------------- /train_sample_videos/abqwwspghj.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aaronchong888/DeepFake-Detect/567dd172389fb86db68f03acd9c219be53a9c0dd/train_sample_videos/abqwwspghj.mp4 -------------------------------------------------------------------------------- /train_sample_videos/metadata.json: -------------------------------------------------------------------------------- 1 | {"aagfhgtpmv.mp4":{"label":"FAKE","split":"train","original":"vudstovrck.mp4"},"aapnvogymq.mp4":{"label":"FAKE","split":"train","original":"jdubbvfswz.mp4"},"abarnvbtwb.mp4":{"label":"REAL","split":"train","original":null},"abofeumbvv.mp4":{"label":"FAKE","split":"train","original":"atvmxvwyns.mp4"},"abqwwspghj.mp4":{"label":"FAKE","split":"train","original":"qzimuostzz.mp4"}} --------------------------------------------------------------------------------