├── .all-contributorsrc
├── Django Application
├── .dockerignore
├── .gitignore
├── Dockerfile
├── README.md
├── bin
│ └── gunicorn_start.sh
├── db.sqlite3
├── manage.py
├── ml_app
│ ├── __init__.py
│ ├── __pycache__
│ │ ├── __init__.cpython-36.pyc
│ │ ├── __init__.cpython-38.pyc
│ │ ├── apps.cpython-36.pyc
│ │ ├── apps.cpython-38.pyc
│ │ ├── forms.cpython-36.pyc
│ │ ├── forms.cpython-38.pyc
│ │ ├── models.cpython-36.pyc
│ │ ├── models.cpython-38.pyc
│ │ ├── urls.cpython-36.pyc
│ │ ├── urls.cpython-38.pyc
│ │ ├── views.cpython-36.pyc
│ │ └── views.cpython-38.pyc
│ ├── admin.py
│ ├── apps.py
│ ├── forms.py
│ ├── models.py
│ ├── templates
│ │ ├── 404.html
│ │ ├── about.html
│ │ ├── cuda_full.html
│ │ ├── index.html
│ │ └── predict.html
│ ├── tests.py
│ ├── urls.py
│ └── views.py
├── nginx
│ ├── Dockerfile
│ └── nginx.conf
├── project_settings
│ ├── __init__.py
│ ├── __pycache__
│ │ ├── __init__.cpython-36.pyc
│ │ ├── __init__.cpython-38.pyc
│ │ ├── settings.cpython-36.pyc
│ │ ├── settings.cpython-38.pyc
│ │ ├── urls.cpython-36.pyc
│ │ ├── urls.cpython-38.pyc
│ │ ├── wsgi.cpython-36.pyc
│ │ └── wsgi.cpython-38.pyc
│ ├── asgi.py
│ ├── settings.py
│ ├── urls.py
│ └── wsgi.py
├── requirements.txt
├── static
│ ├── bootstrap
│ │ └── bootstrap.min.css
│ ├── css
│ │ ├── jquery-ui.css
│ │ └── styles.css
│ ├── images
│ │ ├── Thumbs.db
│ │ ├── background.png
│ │ ├── background1.png
│ │ ├── logo1.png
│ │ ├── thumpdown.png
│ │ └── thumpup.png
│ ├── js
│ │ ├── face-api.js
│ │ ├── face-api.min.js
│ │ ├── jquery-3.4.1.min.js
│ │ ├── jquery-3.5.0.min.js
│ │ ├── jquery-ui.min.js
│ │ ├── popper.min.js
│ │ └── script.js
│ └── json
│ │ ├── age_gender_model-shard1
│ │ ├── age_gender_model-weights_manifest.json
│ │ ├── face_expression_model-shard1
│ │ ├── face_expression_model-weights_manifest.json
│ │ ├── face_landmark_68_model-shard1
│ │ ├── face_landmark_68_model-weights_manifest.json
│ │ ├── face_landmark_68_tiny_model-shard1
│ │ ├── face_landmark_68_tiny_model-weights_manifest.json
│ │ ├── face_recognition_model-shard1
│ │ ├── face_recognition_model-shard2
│ │ ├── face_recognition_model-weights_manifest.json
│ │ ├── mtcnn_model-shard1
│ │ ├── mtcnn_model-weights_manifest.json
│ │ ├── ssd_mobilenetv1_model-shard1
│ │ ├── ssd_mobilenetv1_model-shard2
│ │ ├── ssd_mobilenetv1_model-weights_manifest.json
│ │ ├── tiny_face_detector_model-shard1
│ │ └── tiny_face_detector_model-weights_manifest.json
├── templates
│ ├── base.html
│ ├── footer.html
│ └── nav-bar.html
├── uploaded_images
│ └── Readme.txt
└── uploaded_videos
│ └── Readme.txt
├── Documentation
├── B1_poster .pdf
├── ESE presentation.pptx
├── IJSRDV8I10860.pdf
├── Project Report.pdf
└── README.md
├── LICENSE
├── Model Creation
├── Helpers
│ ├── Create_csv_from_glob.ipynb
│ ├── Remove_audio_altered_files.ipynb
│ ├── copy real and fake .ipynb
│ ├── deepfake-starter-kit.ipynb
│ ├── for_Balancing_data.ipynb
│ └── label_json_to_csv.py
├── Model_and_train_csv.ipynb
├── Predict.ipynb
├── Readme.md
├── labels
│ └── Gobal_metadata.csv
└── preprocessing.ipynb
├── README.md
└── github_assets
├── System Architecture.png
├── fake gif.gif
└── fakegif.gif
/.all-contributorsrc:
--------------------------------------------------------------------------------
1 | {
2 | "files": [
3 | "README.md"
4 | ],
5 | "imageSize": 100,
6 | "commit": false,
7 | "commitType": "docs",
8 | "commitConvention": "angular",
9 | "contributors": [
10 | {
11 | "login": "abhijitjadhav1998",
12 | "name": "Abhijit Jadhav",
13 | "avatar_url": "https://avatars.githubusercontent.com/u/38549908?v=4",
14 | "profile": "https://www.linkedin.com/in/abhijitjadhav1998/",
15 | "contributions": [
16 | "projectManagement"
17 | ]
18 | },
19 | {
20 | "login": "vthonte",
21 | "name": "Vishwesh Thonte",
22 | "avatar_url": "https://avatars.githubusercontent.com/u/43621438?v=4",
23 | "profile": "http://vthonte.vercel.app/",
24 | "contributions": [
25 | "maintenance"
26 | ]
27 | }
28 | ],
29 | "contributorsPerLine": 7,
30 | "skipCi": true,
31 | "repoType": "github",
32 | "repoHost": "https://github.com",
33 | "projectName": "Deepfake_detection_using_deep_learning",
34 | "projectOwner": "abhijitjadhav1998"
35 | }
36 |
--------------------------------------------------------------------------------
/Django Application/.dockerignore:
--------------------------------------------------------------------------------
1 | # Ignore a file or directory in the context root named "modules"
2 | venv
3 |
4 | # Ignore any files or directories within the subdirectory named "modules"
5 | # in the context root
6 | venv/*
7 |
8 | # Ignore any files or directories in the context root beginning with "modules"
9 | venv*
10 |
11 | # Ignore any files or directories one level down from the context root named
12 | # "modules"
13 | */venv
14 |
15 | # Ignore any files or directories at any level, including the context root,
16 | # named modules
17 | **/venv
18 |
19 | # Git
20 | .git
21 | .gitignore
22 |
23 | # Docker
24 | .docker
25 |
26 | # Python
27 | __pycache__
28 | app/__pycache__/
29 | app/*/__pycache__/
30 | app/*/*/__pycache__/
31 | app/*/*/*/__pycache__/
32 | .env/
33 | .venv/
34 | venv/
35 |
36 | # Local PostgreSQL data
37 | data/
--------------------------------------------------------------------------------
/Django Application/.gitignore:
--------------------------------------------------------------------------------
1 | venv
2 | .vs
3 | .vscode
4 | models/*
5 | !models/Readme.txt
6 | uploaded_images/*
7 | !uploaded_images/Readme.txt
8 | uploaded_videos/*
9 | !uploaded_videos/Readme.txt
10 |
11 |
--------------------------------------------------------------------------------
/Django Application/Dockerfile:
--------------------------------------------------------------------------------
1 | #pull the nvidia cuda GPU docker image
2 | FROM nvidia/cuda
3 |
4 | #pull python 3.6.8 docker image
5 | FROM python:3.6.8
6 | ENV PYTHONDONTWRITEBYTECODE 1
7 | ENV PYTHONUNBUFFERED 1
8 | #create a directory to serve static files
9 | RUN mkdir -p /home/app/staticfiles/app/uploaded_videos/
10 | WORKDIR /app
11 | COPY ./requirements.txt /app/requirements.txt
12 | RUN python -m pip install --upgrade pip
13 | RUN pip install cmake
14 | RUN pip install opencv-python==4.2.0.32
15 | RUN pip install -r requirements.txt
16 | COPY . /app
17 | RUN python manage.py collectstatic --noinput
18 | RUN pip install gunicorn
19 | RUN mkdir -p /app/uploaded_videos/app/uploaded_videos/
20 |
21 | VOLUME /app/run/
22 | ENTRYPOINT ["/app/bin/gunicorn_start.sh"]
--------------------------------------------------------------------------------
/Django Application/README.md:
--------------------------------------------------------------------------------
1 | # Deep fake detection Django Application
2 | ## Requirements:
3 |
4 | **Note :** Nvidia GPU is mandatory to run the application.
5 | - CUDA version >= 10.0 for GPU
6 | - GPU Compute Capability > 3.0
7 |
8 |
9 | You can find the list of requirements in [requirements.txt](https://github.com/abhijitjadhav1998/Deepfake_detection_using_deep_learning/blob/master/Django%20Application/requirements.txt). Main requirements are listed below:
10 |
11 | ```
12 | Python >= v3.6
13 | Django >= v3.0
14 | ```
15 |
16 | ## Directory Structure
17 |
18 | - ml_app -> Directory containing code in views.py file
19 | - project_settings -> Contains Django settings and files to run in production
20 | - static -> Contains all css, js and json files (for face-api)
21 | - templates -> Template files for HTML
22 |
23 | Note: Before running the project make sure you have created directories namely models, uploaded_images, uploaded_videos in the project root and that you have proper permissions to access them.
24 | # Running application on Docker
25 | #### Step 1: Install docker desktop and start the Docker daemon
26 |
27 | #### Step 2: Run the deepfake detection docker docker image
28 | ```
29 | docker run --rm --gpus all -v static_volume:/home/app/staticfiles/ -v media_volume:/app/uploaded_videos/ --name=deepfakeapplication abhijitjadhav1998/deefake-detection-20framemodel
30 | ```
31 | #### Step 3: Run the Ngnix reverse proxy server docker image
32 | ```
33 | docker run -p 80:80 --volumes-from deepfakeapplication -v static_volume:/home/app/staticfiles/ -v media_volume:/app/uploaded_videos/ abhijitjadhav1998/deepfake-nginx-proxyserver
34 | ```
35 | #### Step 4: All set now launch up your application at [http://localhost:80](http://localhost:80)
36 |
37 | ### Step 5: Star⭐ this repo 😉 on
and Star⭐ this image on
38 |
39 | ## We deserve a Coffee ☕
40 |
41 |
42 | Please note that currently we have only pushed the image of 20 Frames model, If you can to create your own image of other frames model follow the steps given in the [blog](https://abhijithjadhav.medium.com/dockerise-deepfake-detection-django-application-using-nvidia-cuda-40cdda3b6d38).
43 |
44 | # Running application locally on your machine
45 |
46 | ### Prerequisite
47 | 1. Copy your trained model to the models folder.
48 | - You can download our trained models from the [Google Drive](https://drive.google.com/drive/folders/1UX8jXUXyEjhLLZ38tcgOwGsZ6XFSLDJ-?usp=sharing) or you can train your models using the steps mentioned in Model Creation directory.
49 |
50 | #### Step 1 : Clone the repo and Navigate to Django Application
51 |
52 | `git clone https://github.com/abhijitjadhav1998/Deepfake_detection_using_deep_learning.git`
53 |
54 | #### Step 2: Create virtualenv (optional)
55 |
56 | `python -m venv venv`
57 |
58 | #### Step 3: Activate virtualenv (optional)
59 |
60 | `venv\Scripts\activate`
61 |
62 | #### Step 4: Install requirements
63 |
64 | `pip install -r requirements.txt`
65 |
66 | #### Step 5: Copy Models
67 |
68 | `Copy your trained model to the models folder i.e Django Application/models/`
69 |
70 | - You can download our trained models from [Google Drive](https://drive.google.com/drive/folders/1UX8jXUXyEjhLLZ38tcgOwGsZ6XFSLDJ-?usp=sharing)
71 |
72 | **Note :** The model name must be in specified format only i.e *model_84_acc_10_frames_final_data.pt*. Make sure that no of frames must be mentioned after certain 3 underscores `_` , in the above example the model is for 10 frames.
73 |
74 |
75 | ### Step 6: Run project
76 |
77 | `python manage.py runserver`
78 |
79 | ## Demo
80 | ### You can watch the [youtube video](https://www.youtube.com/watch?v=_q16aJTXVRE&t=823s) for demo
81 |
14 |
15 |
16 |
17 |
Frames Split
18 |
19 | {% for each_image in preprocessed_images %}
20 |

21 | {%endfor%}
22 |
23 |
24 |
25 |
Face Cropped Frames
26 |
27 | {% for each_image in faces_cropped_images %}
28 |

29 | {%endfor%}
30 |
31 |
32 |
33 |
Play to see Result
34 |
37 | {%if output == "REAL" %}
38 |
Result: {{output}}
39 |
40 | {%else%}
41 | Result: {{output}}
42 |
43 | {%endif%}
44 |
45 |
54 | {%endif%}
55 | {%endblock%}
56 | {%block js_cripts%}
57 |
58 |
110 | {%endblock%}
--------------------------------------------------------------------------------
/Django Application/ml_app/tests.py:
--------------------------------------------------------------------------------
1 | from django.test import TestCase
2 |
3 | # Create your tests here.
4 |
--------------------------------------------------------------------------------
/Django Application/ml_app/urls.py:
--------------------------------------------------------------------------------
1 | """project_settings URL Configuration
2 | """
3 | from django.contrib import admin
4 | from django.urls import path, include
5 | from . import views
6 | from .views import about, index, predict_page,cuda_full
7 |
8 | app_name = 'ml_app'
9 | handler404 = views.handler404
10 |
11 | urlpatterns = [
12 | path('', index, name='home'),
13 | path('about/', about, name='about'),
14 | path('predict/', predict_page, name='predict'),
15 | path('cuda_full/',cuda_full,name='cuda_full'),
16 | ]
17 |
--------------------------------------------------------------------------------
/Django Application/ml_app/views.py:
--------------------------------------------------------------------------------
1 | from django.shortcuts import render, redirect
2 | import torch
3 | import torchvision
4 | from torchvision import transforms, models
5 | from torch.utils.data import DataLoader
6 | from torch.utils.data.dataset import Dataset
7 | import os
8 | import numpy as np
9 | import cv2
10 | import matplotlib.pyplot as plt
11 | import face_recognition
12 | from torch.autograd import Variable
13 | import time
14 | import sys
15 | from torch import nn
16 | import json
17 | import glob
18 | import copy
19 | from torchvision import models
20 | import shutil
21 | from PIL import Image as pImage
22 | import time
23 | from django.conf import settings
24 | from .forms import VideoUploadForm
25 |
26 | index_template_name = 'index.html'
27 | predict_template_name = 'predict.html'
28 | about_template_name = "about.html"
29 |
30 | im_size = 112
31 | mean=[0.485, 0.456, 0.406]
32 | std=[0.229, 0.224, 0.225]
33 | sm = nn.Softmax()
34 | inv_normalize = transforms.Normalize(mean=-1*np.divide(mean,std),std=np.divide([1,1,1],std))
35 | if torch.cuda.is_available():
36 | device = 'gpu'
37 | else:
38 | device = 'cpu'
39 |
40 | train_transforms = transforms.Compose([
41 | transforms.ToPILImage(),
42 | transforms.Resize((im_size,im_size)),
43 | transforms.ToTensor(),
44 | transforms.Normalize(mean,std)])
45 |
46 | class Model(nn.Module):
47 |
48 | def __init__(self, num_classes,latent_dim= 2048, lstm_layers=1 , hidden_dim = 2048, bidirectional = False):
49 | super(Model, self).__init__()
50 | model = models.resnext50_32x4d(pretrained = True)
51 | self.model = nn.Sequential(*list(model.children())[:-2])
52 | self.lstm = nn.LSTM(latent_dim,hidden_dim, lstm_layers, bidirectional)
53 | self.relu = nn.LeakyReLU()
54 | self.dp = nn.Dropout(0.4)
55 | self.linear1 = nn.Linear(2048,num_classes)
56 | self.avgpool = nn.AdaptiveAvgPool2d(1)
57 |
58 | def forward(self, x):
59 | batch_size,seq_length, c, h, w = x.shape
60 | x = x.view(batch_size * seq_length, c, h, w)
61 | fmap = self.model(x)
62 | x = self.avgpool(fmap)
63 | x = x.view(batch_size,seq_length,2048)
64 | x_lstm,_ = self.lstm(x,None)
65 | return fmap,self.dp(self.linear1(x_lstm[:,-1,:]))
66 |
67 |
68 | class validation_dataset(Dataset):
69 | def __init__(self,video_names,sequence_length=60,transform = None):
70 | self.video_names = video_names
71 | self.transform = transform
72 | self.count = sequence_length
73 |
74 | def __len__(self):
75 | return len(self.video_names)
76 |
77 | def __getitem__(self,idx):
78 | video_path = self.video_names[idx]
79 | frames = []
80 | a = int(100/self.count)
81 | first_frame = np.random.randint(0,a)
82 | for i,frame in enumerate(self.frame_extract(video_path)):
83 | #if(i % a == first_frame):
84 | faces = face_recognition.face_locations(frame)
85 | try:
86 | top,right,bottom,left = faces[0]
87 | frame = frame[top:bottom,left:right,:]
88 | except:
89 | pass
90 | frames.append(self.transform(frame))
91 | if(len(frames) == self.count):
92 | break
93 | """
94 | for i,frame in enumerate(self.frame_extract(video_path)):
95 | if(i % a == first_frame):
96 | frames.append(self.transform(frame))
97 | """
98 | # if(len(frames)
1:
194 | accuracy = []
195 | for filename in sequence_model:
196 | acc = filename.split("_")[1]
197 | accuracy.append(acc) # Convert accuracy to float for proper comparison
198 | max_index = accuracy.index(max(accuracy))
199 | final_model = os.path.join(settings.PROJECT_DIR, "models", sequence_model[max_index])
200 | elif len(sequence_model) == 1:
201 | final_model = os.path.join(settings.PROJECT_DIR, "models", sequence_model[0])
202 | else:
203 | print("No model found for the specified sequence length.") # Handle no models found case
204 |
205 | return final_model
206 |
207 | ALLOWED_VIDEO_EXTENSIONS = set(['mp4','gif','webm','avi','3gp','wmv','flv','mkv'])
208 |
209 | def allowed_video_file(filename):
210 | #print("filename" ,filename.rsplit('.',1)[1].lower())
211 | if (filename.rsplit('.',1)[1].lower() in ALLOWED_VIDEO_EXTENSIONS):
212 | return True
213 | else:
214 | return False
215 | def index(request):
216 | if request.method == 'GET':
217 | video_upload_form = VideoUploadForm()
218 | if 'file_name' in request.session:
219 | del request.session['file_name']
220 | if 'preprocessed_images' in request.session:
221 | del request.session['preprocessed_images']
222 | if 'faces_cropped_images' in request.session:
223 | del request.session['faces_cropped_images']
224 | return render(request, index_template_name, {"form": video_upload_form})
225 | else:
226 | video_upload_form = VideoUploadForm(request.POST, request.FILES)
227 | if video_upload_form.is_valid():
228 | video_file = video_upload_form.cleaned_data['upload_video_file']
229 | video_file_ext = video_file.name.split('.')[-1]
230 | sequence_length = video_upload_form.cleaned_data['sequence_length']
231 | video_content_type = video_file.content_type.split('/')[0]
232 | if video_content_type in settings.CONTENT_TYPES:
233 | if video_file.size > int(settings.MAX_UPLOAD_SIZE):
234 | video_upload_form.add_error("upload_video_file", "Maximum file size 100 MB")
235 | return render(request, index_template_name, {"form": video_upload_form})
236 |
237 | if sequence_length <= 0:
238 | video_upload_form.add_error("sequence_length", "Sequence Length must be greater than 0")
239 | return render(request, index_template_name, {"form": video_upload_form})
240 |
241 | if allowed_video_file(video_file.name) == False:
242 | video_upload_form.add_error("upload_video_file","Only video files are allowed ")
243 | return render(request, index_template_name, {"form": video_upload_form})
244 |
245 | saved_video_file = 'uploaded_file_'+str(int(time.time()))+"."+video_file_ext
246 | if settings.DEBUG:
247 | with open(os.path.join(settings.PROJECT_DIR, 'uploaded_videos', saved_video_file), 'wb') as vFile:
248 | shutil.copyfileobj(video_file, vFile)
249 | request.session['file_name'] = os.path.join(settings.PROJECT_DIR, 'uploaded_videos', saved_video_file)
250 | else:
251 | with open(os.path.join(settings.PROJECT_DIR, 'uploaded_videos','app','uploaded_videos', saved_video_file), 'wb') as vFile:
252 | shutil.copyfileobj(video_file, vFile)
253 | request.session['file_name'] = os.path.join(settings.PROJECT_DIR, 'uploaded_videos','app','uploaded_videos', saved_video_file)
254 | request.session['sequence_length'] = sequence_length
255 | return redirect('ml_app:predict')
256 | else:
257 | return render(request, index_template_name, {"form": video_upload_form})
258 |
259 | def predict_page(request):
260 | if request.method == "GET":
261 | # Redirect to 'home' if 'file_name' is not in session
262 | if 'file_name' not in request.session:
263 | return redirect("ml_app:home")
264 | if 'file_name' in request.session:
265 | video_file = request.session['file_name']
266 | if 'sequence_length' in request.session:
267 | sequence_length = request.session['sequence_length']
268 | path_to_videos = [video_file]
269 | video_file_name = os.path.basename(video_file)
270 | video_file_name_only = os.path.splitext(video_file_name)[0]
271 | # Production environment adjustments
272 | if not settings.DEBUG:
273 | production_video_name = os.path.join('/home/app/staticfiles/', video_file_name.split('/')[3])
274 | print("Production file name", production_video_name)
275 | else:
276 | production_video_name = video_file_name
277 |
278 | # Load validation dataset
279 | video_dataset = validation_dataset(path_to_videos, sequence_length=sequence_length, transform=train_transforms)
280 |
281 | # Load model
282 | if(device == "gpu"):
283 | model = Model(2).cuda() # Adjust the model instantiation according to your model structure
284 | else:
285 | model = Model(2).cpu() # Adjust the model instantiation according to your model structure
286 | model_name = os.path.join(settings.PROJECT_DIR, 'models', get_accurate_model(sequence_length))
287 | path_to_model = os.path.join(settings.PROJECT_DIR, model_name)
288 | model.load_state_dict(torch.load(path_to_model, map_location=torch.device('cpu')))
289 | model.eval()
290 | start_time = time.time()
291 | # Display preprocessing images
292 | print("<=== | Started Videos Splitting | ===>")
293 | preprocessed_images = []
294 | faces_cropped_images = []
295 | cap = cv2.VideoCapture(video_file)
296 | frames = []
297 | while cap.isOpened():
298 | ret, frame = cap.read()
299 | if ret:
300 | frames.append(frame)
301 | else:
302 | break
303 | cap.release()
304 |
305 | print(f"Number of frames: {len(frames)}")
306 | # Process each frame for preprocessing and face cropping
307 | padding = 40
308 | faces_found = 0
309 | for i in range(sequence_length):
310 | if i >= len(frames):
311 | break
312 | frame = frames[i]
313 |
314 | # Convert BGR to RGB
315 | rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
316 |
317 | # Save preprocessed image
318 | image_name = f"{video_file_name_only}_preprocessed_{i+1}.png"
319 | image_path = os.path.join(settings.PROJECT_DIR, 'uploaded_images', image_name)
320 | img_rgb = pImage.fromarray(rgb_frame, 'RGB')
321 | img_rgb.save(image_path)
322 | preprocessed_images.append(image_name)
323 |
324 | # Face detection and cropping
325 | face_locations = face_recognition.face_locations(rgb_frame)
326 | if len(face_locations) == 0:
327 | continue
328 |
329 | top, right, bottom, left = face_locations[0]
330 | frame_face = frame[top - padding:bottom + padding, left - padding:right + padding]
331 |
332 | # Convert cropped face image to RGB and save
333 | rgb_face = cv2.cvtColor(frame_face, cv2.COLOR_BGR2RGB)
334 | img_face_rgb = pImage.fromarray(rgb_face, 'RGB')
335 | image_name = f"{video_file_name_only}_cropped_faces_{i+1}.png"
336 | image_path = os.path.join(settings.PROJECT_DIR, 'uploaded_images', image_name)
337 | img_face_rgb.save(image_path)
338 | faces_found += 1
339 | faces_cropped_images.append(image_name)
340 |
341 | print("<=== | Videos Splitting and Face Cropping Done | ===>")
342 | print("--- %s seconds ---" % (time.time() - start_time))
343 |
344 | # No face detected
345 | if faces_found == 0:
346 | return render(request, 'predict_template_name.html', {"no_faces": True})
347 |
348 | # Perform prediction
349 | try:
350 | heatmap_images = []
351 | output = ""
352 | confidence = 0.0
353 |
354 | for i in range(len(path_to_videos)):
355 | print("<=== | Started Prediction | ===>")
356 | prediction = predict(model, video_dataset[i], './', video_file_name_only)
357 | confidence = round(prediction[1], 1)
358 | output = "REAL" if prediction[0] == 1 else "FAKE"
359 | print("Prediction:", prediction[0], "==", output, "Confidence:", confidence)
360 | print("<=== | Prediction Done | ===>")
361 | print("--- %s seconds ---" % (time.time() - start_time))
362 |
363 | # Uncomment if you want to create heat map images
364 | # for j in range(sequence_length):
365 | # heatmap_images.append(plot_heat_map(j, model, video_dataset[i], './', video_file_name_only))
366 |
367 | # Render results
368 | context = {
369 | 'preprocessed_images': preprocessed_images,
370 | 'faces_cropped_images': faces_cropped_images,
371 | 'heatmap_images': heatmap_images,
372 | 'original_video': production_video_name,
373 | 'models_location': os.path.join(settings.PROJECT_DIR, 'models'),
374 | 'output': output,
375 | 'confidence': confidence
376 | }
377 |
378 | if settings.DEBUG:
379 | return render(request, predict_template_name, context)
380 | else:
381 | return render(request, predict_template_name, context)
382 |
383 | except Exception as e:
384 | print(f"Exception occurred during prediction: {e}")
385 | return render(request, 'cuda_full.html')
386 | def about(request):
387 | return render(request, about_template_name)
388 |
389 | def handler404(request,exception):
390 | return render(request, '404.html', status=404)
391 | def cuda_full(request):
392 | return render(request, 'cuda_full.html')
393 |
--------------------------------------------------------------------------------
/Django Application/nginx/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM nginx
2 | WORKDIR /etc/nginx/
3 | RUN rm /etc/nginx/conf.d/default.conf
4 | COPY nginx.conf /etc/nginx/conf.d
5 | EXPOSE 80
--------------------------------------------------------------------------------
/Django Application/nginx/nginx.conf:
--------------------------------------------------------------------------------
1 | upstream project_settings {
2 | server unix:/app/run/gunicorn.sock
3 | fail_timeout=0;
4 | }
5 |
6 | server {
7 |
8 | listen 80;
9 | # disable any limits to avoid HTTP 413 for large image uploads
10 | client_max_body_size 0;
11 |
12 | location / {
13 | if (!-f $request_filename) {
14 | proxy_pass http://project_settings;
15 | break;
16 | }
17 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
18 | proxy_set_header Host $host;
19 | proxy_redirect off;
20 | }
21 | #static file directory
22 | location /static/ {
23 | alias /home/app/staticfiles/;
24 | }
25 | #media file directory
26 | location /media/ {
27 | alias /app/uploaded_videos/;
28 | }
29 | }
30 |
31 |
32 |
--------------------------------------------------------------------------------
/Django Application/project_settings/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/abhijithjadhav/Deepfake_detection_using_deep_learning/534f0e9915754b3a21eb1a838925f60d90f2866c/Django Application/project_settings/__init__.py
--------------------------------------------------------------------------------
/Django Application/project_settings/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/abhijithjadhav/Deepfake_detection_using_deep_learning/534f0e9915754b3a21eb1a838925f60d90f2866c/Django Application/project_settings/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/Django Application/project_settings/__pycache__/__init__.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/abhijithjadhav/Deepfake_detection_using_deep_learning/534f0e9915754b3a21eb1a838925f60d90f2866c/Django Application/project_settings/__pycache__/__init__.cpython-38.pyc
--------------------------------------------------------------------------------
/Django Application/project_settings/__pycache__/settings.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/abhijithjadhav/Deepfake_detection_using_deep_learning/534f0e9915754b3a21eb1a838925f60d90f2866c/Django Application/project_settings/__pycache__/settings.cpython-36.pyc
--------------------------------------------------------------------------------
/Django Application/project_settings/__pycache__/settings.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/abhijithjadhav/Deepfake_detection_using_deep_learning/534f0e9915754b3a21eb1a838925f60d90f2866c/Django Application/project_settings/__pycache__/settings.cpython-38.pyc
--------------------------------------------------------------------------------
/Django Application/project_settings/__pycache__/urls.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/abhijithjadhav/Deepfake_detection_using_deep_learning/534f0e9915754b3a21eb1a838925f60d90f2866c/Django Application/project_settings/__pycache__/urls.cpython-36.pyc
--------------------------------------------------------------------------------
/Django Application/project_settings/__pycache__/urls.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/abhijithjadhav/Deepfake_detection_using_deep_learning/534f0e9915754b3a21eb1a838925f60d90f2866c/Django Application/project_settings/__pycache__/urls.cpython-38.pyc
--------------------------------------------------------------------------------
/Django Application/project_settings/__pycache__/wsgi.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/abhijithjadhav/Deepfake_detection_using_deep_learning/534f0e9915754b3a21eb1a838925f60d90f2866c/Django Application/project_settings/__pycache__/wsgi.cpython-36.pyc
--------------------------------------------------------------------------------
/Django Application/project_settings/__pycache__/wsgi.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/abhijithjadhav/Deepfake_detection_using_deep_learning/534f0e9915754b3a21eb1a838925f60d90f2866c/Django Application/project_settings/__pycache__/wsgi.cpython-38.pyc
--------------------------------------------------------------------------------
/Django Application/project_settings/asgi.py:
--------------------------------------------------------------------------------
1 | """
2 | ASGI config for project_settings project.
3 |
4 | It exposes the ASGI callable as a module-level variable named ``application``.
5 |
6 | For more information on this file, see
7 | https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
8 | """
9 |
10 | import os
11 |
12 | from django.core.asgi import get_asgi_application
13 |
14 | os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'project_settings.settings')
15 |
16 | application = get_asgi_application()
17 |
--------------------------------------------------------------------------------
/Django Application/project_settings/settings.py:
--------------------------------------------------------------------------------
1 | """
2 | Django settings for project_settings project.
3 | """
4 |
5 | import os
6 |
7 | # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
8 | BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
9 |
10 | # Build paths inside the project like this: os.path.join(PROJECT_DIR, ...)
11 | PROJECT_DIR = os.path.abspath(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
12 |
13 |
14 | # Quick-start development settings - unsuitable for production
15 | # See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
16 |
17 | # SECURITY WARNING: keep the secret key used in production secret!
18 | SECRET_KEY = '@)0qp0!&-vht7k0wyuihr+nk-b8zrvb5j^1d@vl84cd1%)f=dz'
19 |
20 | # SECURITY WARNING: don't run with debug turned on in production!
21 | DEBUG = True
22 |
23 | # Change and set this to correct IP/Domain
24 | ALLOWED_HOSTS = ["*"]
25 |
26 |
27 | # Application definition
28 |
29 | INSTALLED_APPS = [
30 | 'django.contrib.contenttypes',
31 | 'django.contrib.sessions',
32 | 'django.contrib.messages',
33 | 'django.contrib.staticfiles',
34 | 'ml_app.apps.MlAppConfig'
35 | ]
36 |
37 | MIDDLEWARE = [
38 | 'django.middleware.security.SecurityMiddleware',
39 | 'django.contrib.sessions.middleware.SessionMiddleware',
40 | 'django.middleware.common.CommonMiddleware',
41 | 'django.middleware.csrf.CsrfViewMiddleware',
42 | 'django.contrib.messages.middleware.MessageMiddleware',
43 | 'django.middleware.clickjacking.XFrameOptionsMiddleware',
44 | ]
45 |
46 | ROOT_URLCONF = 'project_settings.urls'
47 |
48 | TEMPLATES = [
49 | {
50 | 'BACKEND': 'django.template.backends.django.DjangoTemplates',
51 | 'DIRS': [os.path.join(PROJECT_DIR, 'templates')],
52 | 'APP_DIRS': True,
53 | 'OPTIONS': {
54 | 'context_processors': [
55 | 'django.template.context_processors.debug',
56 | 'django.template.context_processors.request',
57 | 'django.contrib.messages.context_processors.messages',
58 | 'django.template.context_processors.media'
59 | ],
60 | },
61 | },
62 | ]
63 |
64 | WSGI_APPLICATION = 'project_settings.wsgi.application'
65 |
66 |
67 | # Database
68 | # https://docs.djangoproject.com/en/3.0/ref/settings/#databases
69 |
70 | DATABASES = {
71 | "default": {
72 | "ENGINE": "django.db.backends.sqlite3",
73 | "NAME": os.path.join(PROJECT_DIR, 'db.sqlite3'),
74 | }
75 | }
76 |
77 |
78 | # Internationalization
79 | # https://docs.djangoproject.com/en/3.0/topics/i18n/
80 |
81 | LANGUAGE_CODE = 'en-us'
82 |
83 | TIME_ZONE = 'UTC'
84 |
85 | USE_I18N = False
86 |
87 | USE_L10N = False
88 |
89 | USE_TZ = False
90 |
91 |
92 | # Static files (CSS, JavaScript, Images)
93 | # https://docs.djangoproject.com/en/3.0/howto/static-files/
94 |
95 | #used in production to serve static files
96 | STATIC_ROOT = "/home/app/staticfiles/"
97 |
98 | #url for static files
99 | STATIC_URL = '/static/'
100 |
101 | STATICFILES_DIRS = [
102 | os.path.join(PROJECT_DIR, 'uploaded_images'),
103 | os.path.join(PROJECT_DIR, 'static'),
104 | os.path.join(PROJECT_DIR, 'models'),
105 | ]
106 |
107 | CONTENT_TYPES = ['video']
108 | MAX_UPLOAD_SIZE = "104857600"
109 |
110 | MEDIA_URL = "/media/"
111 |
112 | MEDIA_ROOT = os.path.join(PROJECT_DIR, 'uploaded_videos')
113 |
114 | #for extra logging in production environment
115 | if DEBUG == False:
116 | LOGGING = {
117 | 'version': 1,
118 | 'disable_existing_loggers': False,
119 | 'handlers': {
120 | 'console': {
121 | 'class': 'logging.StreamHandler',
122 | },
123 | 'file': {
124 | 'level': 'DEBUG',
125 | 'class': 'logging.FileHandler',
126 | 'filename': 'log.django',
127 | },
128 | },
129 | 'loggers': {
130 | 'django': {
131 | 'handlers': ['console','file'],
132 | 'level': os.getenv('DJANGO_LOG_LEVEL', 'DEBUG'),
133 | },
134 | },
135 | }
136 |
--------------------------------------------------------------------------------
/Django Application/project_settings/urls.py:
--------------------------------------------------------------------------------
1 | """project_settings URL Configuration
2 | """
3 | from django.contrib import admin
4 | from django.urls import path, include
5 |
6 | from django.conf import settings
7 | from django.conf.urls.static import static
8 |
9 | urlpatterns = [
10 | path('', include('ml_app.urls')),
11 | ] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
12 |
--------------------------------------------------------------------------------
/Django Application/project_settings/wsgi.py:
--------------------------------------------------------------------------------
1 | """
2 | WSGI config for project_settings project.
3 |
4 | It exposes the WSGI callable as a module-level variable named ``application``.
5 |
6 | For more information on this file, see
7 | https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
8 | """
9 |
10 | import os
11 |
12 | from django.core.wsgi import get_wsgi_application
13 |
14 | os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'project_settings.settings')
15 |
16 | application = get_wsgi_application()
17 |
--------------------------------------------------------------------------------
/Django Application/requirements.txt:
--------------------------------------------------------------------------------
1 | altair==5.3.0
2 | asgiref==3.8.1
3 | astor==0.8.1
4 | attrs==23.2.0
5 | backcall==0.2.0
6 | base58==2.1.1
7 | bleach==6.1.0
8 | blinker==1.8.2
9 | cachetools==5.3.3
10 | certifi==2024.6.2
11 | chardet==5.2.0
12 | click==8.1.7
13 | cmake==3.29.6
14 | colorama==0.4.6
15 | cycler==0.12.1
16 | decorator==5.1.1
17 | defusedxml==0.7.1
18 | Django==5.0.6
19 | dlib==19.24.2
20 | docutils==0.21.2
21 | entrypoints==0.4
22 | enum-compat==0.0.3
23 | face-recognition==1.3.0
24 | face-recognition-models==0.3.0
25 | future==1.0.0
26 | google==3.0.0
27 | google-api-core==2.19.1
28 | google-api-python-client==2.134.0
29 | google-auth==2.30.0
30 | google-auth-httplib2==0.2.0
31 | googleapis-common-protos==1.63.2
32 | httplib2==0.22.0
33 | idna==3.7
34 | ipykernel==6.29.4
35 | ipython==8.12.3
36 | ipython-genutils==0.2.0
37 | ipywidgets==8.1.3
38 | jedi==0.16.0
39 | Jinja2==3.1.4
40 | jmespath==1.0.1
41 | json5==0.9.25
42 | jsonschema==4.22.0
43 | jupyter-client==8.6.2
44 | jupyter-core==5.7.2
45 | jupyterlab==4.2.2
46 | jupyterlab-server==2.27.2
47 | kiwisolver==1.4.5
48 | MarkupSafe==2.1.5
49 | matplotlib==3.9.0
50 | mistune==3.0.2
51 | nbconvert==7.16.4
52 | nbformat==5.10.4
53 | notebook==7.2.1
54 | numpy===1.26.4
55 | opencv-python==4.10.0.84
56 | packaging==24.1
57 | pandas==2.2.2
58 | pandocfilters==1.5.1
59 | parso==0.8.4
60 | pathtools==0.1.2
61 | pickleshare==0.7.5
62 | Pillow==10.3.0
63 | prometheus-client==0.20.0
64 | prompt-toolkit==3.0.47
65 | protobuf==5.27.2
66 | pyasn1==0.6.0
67 | pyasn1-modules==0.4.0
68 | pycodestyle==2.12.0
69 | pydeck==0.9.0b1
70 | Pygments==2.18.0
71 | pyparsing==3.1.2
72 | pyrsistent==0.20.0
73 | python-dateutil==2.9.0
74 | # pytz==2019.3 Commenting for production deployment
75 | # pywin32==227
76 | pywinpty==0.5.7
77 | PyYAML==6.0.1
78 | pyzmq==26.0.3
79 | requests==2.32.3
80 | rsa==4.9
81 | s3transfer==0.10.2
82 | Send2Trash==1.8.3
83 | six==1.16.0
84 | soupsieve==2.5
85 | sqlparse==0.5.0
86 | terminado==0.18.1
87 | testpath==0.6.0
88 | toml==0.10.2
89 | toolz==0.12.1
90 | torch==2.3.1
91 | torchvision==0.18.1
92 | tornado==6.4.1
93 | traitlets==5.14.3
94 | tzlocal==5.2
95 | uritemplate==4.1.1
96 | urllib3==2.2.2
97 | validators==0.28.3
98 | watchdog==4.0.1
99 | wcwidth==0.2.13
100 | webencodings==0.5.1
101 | widgetsnbextension==4.0.11
102 |
--------------------------------------------------------------------------------
/Django Application/static/css/styles.css:
--------------------------------------------------------------------------------
1 | body {
2 | height: 100%;
3 | }
4 | .bg {
5 | /* The image used */
6 | background-image: url("/static/images/background1.png");
7 |
8 | -webkit-background-size: cover;
9 | -moz-background-size: cover;
10 | -o-background-size: cover;
11 |
12 | /* Center and scale the image nicely */
13 | background-position: center;
14 | background-repeat: no-repeat;
15 | background-size: cover;
16 |
17 |
18 | }
19 |
20 | section{
21 | padding-top: 60px;
22 | }
23 |
24 | .width-300{
25 | width: 300px;
26 | margin: auto;
27 | padding: 20px;
28 | box-shadow: 0 0px 9px 2px #ccc;
29 | }
30 |
31 | .width-400{
32 | width: 400px;
33 | margin: auto;
34 | padding: 20px;
35 | margin-top: 80px;
36 | margin-bottom: 150px;
37 | box-shadow: 0 0px 9px 2px #ccc;
38 | }
39 |
40 | .width-500{
41 | width: 500px;
42 | margin: auto;
43 | padding: 20px;
44 | box-shadow: 0 0px 9px 2px #ccc;
45 | }
46 |
47 | #videos{
48 | display: none;
49 | }
50 |
51 | canvas {
52 | position: absolute;
53 | top: 0;
54 | left: 0;
55 | }
56 |
57 | .preprocess {
58 | padding-right: 20px;
59 | padding-bottom: 50px;
60 | }
61 |
62 | #preprocessed_images {
63 | white-space: nowrap;
64 | width: auto;
65 | height: 250px;
66 | padding: 20px;
67 | overflow-x: scroll;
68 | overflow-y: hidden;
69 | box-shadow: 0 0px 9px 2px #ccc;
70 | }
71 |
72 | #faces_images {
73 | white-space: nowrap;
74 | width: auto;
75 | height: 150px;
76 | padding: 20px;
77 | overflow-x: scroll;
78 | overflow-y: hidden;
79 | box-shadow: 0 0px 9px 2px #ccc;
80 | }
81 |
82 | .faces {
83 | padding-right: 20px;
84 | padding-bottom: 50px;
85 | }
86 |
87 | #heatmap_images{
88 | white-space: nowrap;
89 | width: auto;
90 | height: 200px;
91 | padding: 20px;
92 | overflow-x: scroll;
93 | overflow-y: hidden;
94 | box-shadow: 0 0px 9px 2px #ccc;
95 | margin-bottom: 20px;
96 | }
97 |
98 | .heat-map {
99 | padding-right: 20px;
100 | padding-bottom: 50px;
101 | }
102 |
103 | /* span.spinner-border {
104 | display: none;
105 | } */
--------------------------------------------------------------------------------
/Django Application/static/images/Thumbs.db:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/abhijithjadhav/Deepfake_detection_using_deep_learning/534f0e9915754b3a21eb1a838925f60d90f2866c/Django Application/static/images/Thumbs.db
--------------------------------------------------------------------------------
/Django Application/static/images/background.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/abhijithjadhav/Deepfake_detection_using_deep_learning/534f0e9915754b3a21eb1a838925f60d90f2866c/Django Application/static/images/background.png
--------------------------------------------------------------------------------
/Django Application/static/images/background1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/abhijithjadhav/Deepfake_detection_using_deep_learning/534f0e9915754b3a21eb1a838925f60d90f2866c/Django Application/static/images/background1.png
--------------------------------------------------------------------------------
/Django Application/static/images/logo1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/abhijithjadhav/Deepfake_detection_using_deep_learning/534f0e9915754b3a21eb1a838925f60d90f2866c/Django Application/static/images/logo1.png
--------------------------------------------------------------------------------
/Django Application/static/images/thumpdown.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/abhijithjadhav/Deepfake_detection_using_deep_learning/534f0e9915754b3a21eb1a838925f60d90f2866c/Django Application/static/images/thumpdown.png
--------------------------------------------------------------------------------
/Django Application/static/images/thumpup.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/abhijithjadhav/Deepfake_detection_using_deep_learning/534f0e9915754b3a21eb1a838925f60d90f2866c/Django Application/static/images/thumpup.png
--------------------------------------------------------------------------------
/Django Application/static/js/script.js:
--------------------------------------------------------------------------------
1 | $(document).on("change", "#id_upload_video_file", function(evt) {var $source = $('#video_source');$source[0].src = URL.createObjectURL(this.files[0]);$source.parent()[0].load();$('#videos').css("display", "block");$('#id_upload_video_file').css("display", "none");});$('form').on('submit', function(e){$('#videoUpload').prop("disabled", true);$('#videoUpload').html('Uploading Video Loading...');});
--------------------------------------------------------------------------------
/Django Application/static/json/age_gender_model-shard1:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/abhijithjadhav/Deepfake_detection_using_deep_learning/534f0e9915754b3a21eb1a838925f60d90f2866c/Django Application/static/json/age_gender_model-shard1
--------------------------------------------------------------------------------
/Django Application/static/json/age_gender_model-weights_manifest.json:
--------------------------------------------------------------------------------
1 | [{"weights":[{"name":"entry_flow/conv_in/filters","shape":[3,3,3,32],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.005431825039433498,"min":-0.7441600304023892}},{"name":"entry_flow/conv_in/bias","shape":[32],"dtype":"float32"},{"name":"entry_flow/reduction_block_0/separable_conv0/depthwise_filter","shape":[3,3,32,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.005691980614381678,"min":-0.6090419257388395}},{"name":"entry_flow/reduction_block_0/separable_conv0/pointwise_filter","shape":[1,1,32,64],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.009089225881239947,"min":-1.1179747833925135}},{"name":"entry_flow/reduction_block_0/separable_conv0/bias","shape":[64],"dtype":"float32"},{"name":"entry_flow/reduction_block_0/separable_conv1/depthwise_filter","shape":[3,3,64,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.00683894624897078,"min":-0.8138346036275228}},{"name":"entry_flow/reduction_block_0/separable_conv1/pointwise_filter","shape":[1,1,64,64],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.011632566358528886,"min":-1.3028474321552352}},{"name":"entry_flow/reduction_block_0/separable_conv1/bias","shape":[64],"dtype":"float32"},{"name":"entry_flow/reduction_block_0/expansion_conv/filters","shape":[1,1,32,64],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.010254812240600587,"min":-0.9229331016540528}},{"name":"entry_flow/reduction_block_0/expansion_conv/bias","shape":[64],"dtype":"float32"},{"name":"entry_flow/reduction_block_1/separable_conv0/depthwise_filter","shape":[3,3,64,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.0052509616403018725,"min":-0.6406173201168285}},{"name":"entry_flow/reduction_block_1/separable_conv0/pointwise_filter","shape":[1,1,64,128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.010788509424994973,"min":-1.4564487723743214}},{"name":"entry_flow/reduction_block_1/separable_conv0/bias","shape":[128],"dtype":"float32"},{"name":"entry_flow/reduction_block_1/separable_conv1/depthwise_filter","shape":[3,3,128,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.00553213918910307,"min":-0.7025816770160899}},{"name":"entry_flow/reduction_block_1/separable_conv1/pointwise_filter","shape":[1,1,128,128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.013602388606351965,"min":-1.6186842441558837}},{"name":"entry_flow/reduction_block_1/separable_conv1/bias","shape":[128],"dtype":"float32"},{"name":"entry_flow/reduction_block_1/expansion_conv/filters","shape":[1,1,64,128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.007571851038465313,"min":-1.158493208885193}},{"name":"entry_flow/reduction_block_1/expansion_conv/bias","shape":[128],"dtype":"float32"},{"name":"middle_flow/main_block_0/separable_conv0/depthwise_filter","shape":[3,3,128,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.005766328409606335,"min":-0.6688940955143349}},{"name":"middle_flow/main_block_0/separable_conv0/pointwise_filter","shape":[1,1,128,128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.012136116214826995,"min":-1.5776951079275094}},{"name":"middle_flow/main_block_0/separable_conv0/bias","shape":[128],"dtype":"float32"},{"name":"middle_flow/main_block_0/separable_conv1/depthwise_filter","shape":[3,3,128,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.004314773222979377,"min":-0.5652352922102984}},{"name":"middle_flow/main_block_0/separable_conv1/pointwise_filter","shape":[1,1,128,128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.01107162026798024,"min":-1.2400214700137868}},{"name":"middle_flow/main_block_0/separable_conv1/bias","shape":[128],"dtype":"float32"},{"name":"middle_flow/main_block_0/separable_conv2/depthwise_filter","shape":[3,3,128,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.0036451735917259667,"min":-0.4848080876995536}},{"name":"middle_flow/main_block_0/separable_conv2/pointwise_filter","shape":[1,1,128,128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.008791744942758598,"min":-1.134135097615859}},{"name":"middle_flow/main_block_0/separable_conv2/bias","shape":[128],"dtype":"float32"},{"name":"middle_flow/main_block_1/separable_conv0/depthwise_filter","shape":[3,3,128,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.004915751896652521,"min":-0.6095532351849126}},{"name":"middle_flow/main_block_1/separable_conv0/pointwise_filter","shape":[1,1,128,128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.010868691463096469,"min":-1.3368490499608656}},{"name":"middle_flow/main_block_1/separable_conv0/bias","shape":[128],"dtype":"float32"},{"name":"middle_flow/main_block_1/separable_conv1/depthwise_filter","shape":[3,3,128,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.005010117269029804,"min":-0.6012140722835765}},{"name":"middle_flow/main_block_1/separable_conv1/pointwise_filter","shape":[1,1,128,128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.010311148213405235,"min":-1.3816938605963016}},{"name":"middle_flow/main_block_1/separable_conv1/bias","shape":[128],"dtype":"float32"},{"name":"middle_flow/main_block_1/separable_conv2/depthwise_filter","shape":[3,3,128,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.004911523706772748,"min":-0.7367285560159123}},{"name":"middle_flow/main_block_1/separable_conv2/pointwise_filter","shape":[1,1,128,128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.008976466047997568,"min":-1.2207993825276693}},{"name":"middle_flow/main_block_1/separable_conv2/bias","shape":[128],"dtype":"float32"},{"name":"exit_flow/reduction_block/separable_conv0/depthwise_filter","shape":[3,3,128,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.005074804436926748,"min":-0.7104726211697447}},{"name":"exit_flow/reduction_block/separable_conv0/pointwise_filter","shape":[1,1,128,256],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.011453078307357489,"min":-1.4545409450344011}},{"name":"exit_flow/reduction_block/separable_conv0/bias","shape":[256],"dtype":"float32"},{"name":"exit_flow/reduction_block/separable_conv1/depthwise_filter","shape":[3,3,256,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.007741751390344957,"min":-1.1380374543807086}},{"name":"exit_flow/reduction_block/separable_conv1/pointwise_filter","shape":[1,1,256,256],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.011347713189966538,"min":-1.497898141075583}},{"name":"exit_flow/reduction_block/separable_conv1/bias","shape":[256],"dtype":"float32"},{"name":"exit_flow/reduction_block/expansion_conv/filters","shape":[1,1,128,256],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.006717281014311547,"min":-0.8329428457746318}},{"name":"exit_flow/reduction_block/expansion_conv/bias","shape":[256],"dtype":"float32"},{"name":"exit_flow/separable_conv/depthwise_filter","shape":[3,3,256,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.0027201742518181892,"min":-0.3237007359663645}},{"name":"exit_flow/separable_conv/pointwise_filter","shape":[1,1,256,512],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.010076364348916447,"min":-1.330080094056971}},{"name":"exit_flow/separable_conv/bias","shape":[512],"dtype":"float32"},{"name":"fc/age/weights","shape":[512,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.008674054987290326,"min":-1.2664120281443876}},{"name":"fc/age/bias","shape":[1],"dtype":"float32"},{"name":"fc/gender/weights","shape":[512,2],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.0029948226377075793,"min":-0.34140978069866407}},{"name":"fc/gender/bias","shape":[2],"dtype":"float32"}],"paths":["age_gender_model-shard1"]}]
--------------------------------------------------------------------------------
/Django Application/static/json/face_expression_model-shard1:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/abhijithjadhav/Deepfake_detection_using_deep_learning/534f0e9915754b3a21eb1a838925f60d90f2866c/Django Application/static/json/face_expression_model-shard1
--------------------------------------------------------------------------------
/Django Application/static/json/face_expression_model-weights_manifest.json:
--------------------------------------------------------------------------------
1 | [{"weights":[{"name":"dense0/conv0/filters","shape":[3,3,3,32],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.0057930146946626555,"min":-0.7125408074435067}},{"name":"dense0/conv0/bias","shape":[32],"dtype":"float32"},{"name":"dense0/conv1/depthwise_filter","shape":[3,3,32,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.006473719839956246,"min":-0.6408982641556684}},{"name":"dense0/conv1/pointwise_filter","shape":[1,1,32,32],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.010509579321917366,"min":-1.408283629136927}},{"name":"dense0/conv1/bias","shape":[32],"dtype":"float32"},{"name":"dense0/conv2/depthwise_filter","shape":[3,3,32,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.005666389652326995,"min":-0.7252978754978554}},{"name":"dense0/conv2/pointwise_filter","shape":[1,1,32,32],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.010316079270605948,"min":-1.1760330368490781}},{"name":"dense0/conv2/bias","shape":[32],"dtype":"float32"},{"name":"dense0/conv3/depthwise_filter","shape":[3,3,32,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.0063220320963392074,"min":-0.853474333005793}},{"name":"dense0/conv3/pointwise_filter","shape":[1,1,32,32],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.010322785377502442,"min":-1.4658355236053466}},{"name":"dense0/conv3/bias","shape":[32],"dtype":"float32"},{"name":"dense1/conv0/depthwise_filter","shape":[3,3,32,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.0042531527724920535,"min":-0.5741756242864272}},{"name":"dense1/conv0/pointwise_filter","shape":[1,1,32,64],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.010653339647779278,"min":-1.1825207009035}},{"name":"dense1/conv0/bias","shape":[64],"dtype":"float32"},{"name":"dense1/conv1/depthwise_filter","shape":[3,3,64,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.005166931012097527,"min":-0.6355325144879957}},{"name":"dense1/conv1/pointwise_filter","shape":[1,1,64,64],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.011478300188101974,"min":-1.3888743227603388}},{"name":"dense1/conv1/bias","shape":[64],"dtype":"float32"},{"name":"dense1/conv2/depthwise_filter","shape":[3,3,64,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.006144821410085641,"min":-0.8479853545918185}},{"name":"dense1/conv2/pointwise_filter","shape":[1,1,64,64],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.010541967317169788,"min":-1.3809977185492421}},{"name":"dense1/conv2/bias","shape":[64],"dtype":"float32"},{"name":"dense1/conv3/depthwise_filter","shape":[3,3,64,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.005769844849904378,"min":-0.686611537138621}},{"name":"dense1/conv3/pointwise_filter","shape":[1,1,64,64],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.010939095534530341,"min":-1.2689350820055196}},{"name":"dense1/conv3/bias","shape":[64],"dtype":"float32"},{"name":"dense2/conv0/depthwise_filter","shape":[3,3,64,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.0037769308277204924,"min":-0.40790852939381317}},{"name":"dense2/conv0/pointwise_filter","shape":[1,1,64,128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.01188667194516051,"min":-1.4382873053644218}},{"name":"dense2/conv0/bias","shape":[128],"dtype":"float32"},{"name":"dense2/conv1/depthwise_filter","shape":[3,3,128,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.006497045825509464,"min":-0.8381189114907208}},{"name":"dense2/conv1/pointwise_filter","shape":[1,1,128,128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.011632198913424622,"min":-1.3377028750438316}},{"name":"dense2/conv1/bias","shape":[128],"dtype":"float32"},{"name":"dense2/conv2/depthwise_filter","shape":[3,3,128,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.005947182225246056,"min":-0.7969224181829715}},{"name":"dense2/conv2/pointwise_filter","shape":[1,1,128,128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.011436844339557722,"min":-1.4524792311238306}},{"name":"dense2/conv2/bias","shape":[128],"dtype":"float32"},{"name":"dense2/conv3/depthwise_filter","shape":[3,3,128,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.006665432686899222,"min":-0.8998334127313949}},{"name":"dense2/conv3/pointwise_filter","shape":[1,1,128,128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.01283421422920975,"min":-1.642779421338848}},{"name":"dense2/conv3/bias","shape":[128],"dtype":"float32"},{"name":"dense3/conv0/depthwise_filter","shape":[3,3,128,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.004711699953266218,"min":-0.6737730933170692}},{"name":"dense3/conv0/pointwise_filter","shape":[1,1,128,256],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.010955964817720302,"min":-1.3914075318504784}},{"name":"dense3/conv0/bias","shape":[256],"dtype":"float32"},{"name":"dense3/conv1/depthwise_filter","shape":[3,3,256,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.00554193468654857,"min":-0.7149095745647656}},{"name":"dense3/conv1/pointwise_filter","shape":[1,1,256,256],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.016790372250126858,"min":-2.484975093018775}},{"name":"dense3/conv1/bias","shape":[256],"dtype":"float32"},{"name":"dense3/conv2/depthwise_filter","shape":[3,3,256,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.006361540626077091,"min":-0.8142772001378676}},{"name":"dense3/conv2/pointwise_filter","shape":[1,1,256,256],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.01777329678628959,"min":-1.7062364914838006}},{"name":"dense3/conv2/bias","shape":[256],"dtype":"float32"},{"name":"dense3/conv3/depthwise_filter","shape":[3,3,256,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.006900275922289082,"min":-0.8625344902861353}},{"name":"dense3/conv3/pointwise_filter","shape":[1,1,256,256],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.015449936717164282,"min":-1.9003422162112067}},{"name":"dense3/conv3/bias","shape":[256],"dtype":"float32"},{"name":"fc/weights","shape":[256,7],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.004834276554631252,"min":-0.7203072066400565}},{"name":"fc/bias","shape":[7],"dtype":"float32"}],"paths":["face_expression_model-shard1"]}]
--------------------------------------------------------------------------------
/Django Application/static/json/face_landmark_68_model-shard1:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/abhijithjadhav/Deepfake_detection_using_deep_learning/534f0e9915754b3a21eb1a838925f60d90f2866c/Django Application/static/json/face_landmark_68_model-shard1
--------------------------------------------------------------------------------
/Django Application/static/json/face_landmark_68_model-weights_manifest.json:
--------------------------------------------------------------------------------
1 | [{"weights":[{"name":"dense0/conv0/filters","shape":[3,3,3,32],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.004853619781194949,"min":-0.5872879935245888}},{"name":"dense0/conv0/bias","shape":[32],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.004396426443960153,"min":-0.7298067896973853}},{"name":"dense0/conv1/depthwise_filter","shape":[3,3,32,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.00635151559231328,"min":-0.5589333721235686}},{"name":"dense0/conv1/pointwise_filter","shape":[1,1,32,32],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.009354315552057004,"min":-1.2628325995276957}},{"name":"dense0/conv1/bias","shape":[32],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.0029380727048013726,"min":-0.5846764682554731}},{"name":"dense0/conv2/depthwise_filter","shape":[3,3,32,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.0049374802439820535,"min":-0.6171850304977566}},{"name":"dense0/conv2/pointwise_filter","shape":[1,1,32,32],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.009941946758943446,"min":-1.3421628124573652}},{"name":"dense0/conv2/bias","shape":[32],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.0030300481062309416,"min":-0.5272283704841838}},{"name":"dense0/conv3/depthwise_filter","shape":[3,3,32,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.005672684837790097,"min":-0.7431217137505026}},{"name":"dense0/conv3/pointwise_filter","shape":[1,1,32,32],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.010712201455060173,"min":-1.5639814124387852}},{"name":"dense0/conv3/bias","shape":[32],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.0030966934035806097,"min":-0.3839899820439956}},{"name":"dense1/conv0/depthwise_filter","shape":[3,3,32,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.0039155554537679636,"min":-0.48161332081345953}},{"name":"dense1/conv0/pointwise_filter","shape":[1,1,32,64],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.01023082966898002,"min":-1.094698774580862}},{"name":"dense1/conv0/bias","shape":[64],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.0027264176630506327,"min":-0.3871513081531898}},{"name":"dense1/conv1/depthwise_filter","shape":[3,3,64,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.004583378632863362,"min":-0.5454220573107401}},{"name":"dense1/conv1/pointwise_filter","shape":[1,1,64,64],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.00915846403907327,"min":-1.117332612766939}},{"name":"dense1/conv1/bias","shape":[64],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.003091680419211294,"min":-0.5966943209077797}},{"name":"dense1/conv2/depthwise_filter","shape":[3,3,64,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.005407439727409214,"min":-0.708374604290607}},{"name":"dense1/conv2/pointwise_filter","shape":[1,1,64,64],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.00946493943532308,"min":-1.2399070660273235}},{"name":"dense1/conv2/bias","shape":[64],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.004409168514550901,"min":-0.9788354102303}},{"name":"dense1/conv3/depthwise_filter","shape":[3,3,64,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.004478132958505668,"min":-0.6493292789833219}},{"name":"dense1/conv3/pointwise_filter","shape":[1,1,64,64],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.011063695888893277,"min":-1.2501976354449402}},{"name":"dense1/conv3/bias","shape":[64],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.003909627596537272,"min":-0.6646366914113363}},{"name":"dense2/conv0/depthwise_filter","shape":[3,3,64,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.003213915404151468,"min":-0.3374611174359041}},{"name":"dense2/conv0/pointwise_filter","shape":[1,1,64,128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.010917326048308728,"min":-1.4520043644250609}},{"name":"dense2/conv0/bias","shape":[128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.002800439152063108,"min":-0.38085972468058266}},{"name":"dense2/conv1/depthwise_filter","shape":[3,3,128,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.0050568851770139206,"min":-0.6927932692509071}},{"name":"dense2/conv1/pointwise_filter","shape":[1,1,128,128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.01074961213504567,"min":-1.3222022926106174}},{"name":"dense2/conv1/bias","shape":[128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.0030654204242369708,"min":-0.5487102559384177}},{"name":"dense2/conv2/depthwise_filter","shape":[3,3,128,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.00591809165244009,"min":-0.917304206128214}},{"name":"dense2/conv2/pointwise_filter","shape":[1,1,128,128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.01092823346455892,"min":-1.366029183069865}},{"name":"dense2/conv2/bias","shape":[128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.002681120470458386,"min":-0.36463238398234055}},{"name":"dense2/conv3/depthwise_filter","shape":[3,3,128,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.0048311497650894465,"min":-0.5797379718107336}},{"name":"dense2/conv3/pointwise_filter","shape":[1,1,128,128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.011227761062921263,"min":-1.4483811771168429}},{"name":"dense2/conv3/bias","shape":[128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.0034643323982463162,"min":-0.3360402426298927}},{"name":"dense3/conv0/depthwise_filter","shape":[3,3,128,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.003394978887894574,"min":-0.49227193874471326}},{"name":"dense3/conv0/pointwise_filter","shape":[1,1,128,256],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.010051267287310432,"min":-1.2765109454884247}},{"name":"dense3/conv0/bias","shape":[256],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.003142924752889895,"min":-0.4588670139219247}},{"name":"dense3/conv1/depthwise_filter","shape":[3,3,256,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.00448304671867221,"min":-0.5872791201460595}},{"name":"dense3/conv1/pointwise_filter","shape":[1,1,256,256],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.016063522357566685,"min":-2.3613377865623026}},{"name":"dense3/conv1/bias","shape":[256],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.00287135781026354,"min":-0.47664539650374765}},{"name":"dense3/conv2/depthwise_filter","shape":[3,3,256,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.006002906724518421,"min":-0.7923836876364315}},{"name":"dense3/conv2/pointwise_filter","shape":[1,1,256,256],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.017087187019048954,"min":-1.6061955797906016}},{"name":"dense3/conv2/bias","shape":[256],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.003124481205846749,"min":-0.46242321846531886}},{"name":"dense3/conv3/depthwise_filter","shape":[3,3,256,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.006576311588287353,"min":-1.0193282961845398}},{"name":"dense3/conv3/pointwise_filter","shape":[1,1,256,256],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.015590153955945782,"min":-1.99553970636106}},{"name":"dense3/conv3/bias","shape":[256],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.004453541601405424,"min":-0.6546706154065973}},{"name":"fc/weights","shape":[256,136],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.010417488509533453,"min":-1.500118345372817}},{"name":"fc/bias","shape":[136],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.0025084222648658005,"min":0.07683877646923065}}],"paths":["face_landmark_68_model-shard1"]}]
--------------------------------------------------------------------------------
/Django Application/static/json/face_landmark_68_tiny_model-shard1:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/abhijithjadhav/Deepfake_detection_using_deep_learning/534f0e9915754b3a21eb1a838925f60d90f2866c/Django Application/static/json/face_landmark_68_tiny_model-shard1
--------------------------------------------------------------------------------
/Django Application/static/json/face_landmark_68_tiny_model-weights_manifest.json:
--------------------------------------------------------------------------------
1 | [{"weights":[{"name":"dense0/conv0/filters","shape":[3,3,3,32],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.008194216092427571,"min":-0.9423348506291708}},{"name":"dense0/conv0/bias","shape":[32],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.006839508168837603,"min":-0.8412595047670252}},{"name":"dense0/conv1/depthwise_filter","shape":[3,3,32,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.009194007106855804,"min":-1.2779669878529567}},{"name":"dense0/conv1/pointwise_filter","shape":[1,1,32,32],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.0036026100317637128,"min":-0.3170296827952067}},{"name":"dense0/conv1/bias","shape":[32],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.000740380117706224,"min":-0.06367269012273527}},{"name":"dense0/conv2/depthwise_filter","shape":[3,3,32,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":1,"min":0}},{"name":"dense0/conv2/pointwise_filter","shape":[1,1,32,32],"dtype":"float32","quantization":{"dtype":"uint8","scale":1,"min":0}},{"name":"dense0/conv2/bias","shape":[32],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.0037702228508743585,"min":-0.6220867703942692}},{"name":"dense1/conv0/depthwise_filter","shape":[3,3,32,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.0033707996209462483,"min":-0.421349952618281}},{"name":"dense1/conv0/pointwise_filter","shape":[1,1,32,64],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.014611541991140328,"min":-1.8556658328748217}},{"name":"dense1/conv0/bias","shape":[64],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.002832523046755323,"min":-0.30307996600281956}},{"name":"dense1/conv1/depthwise_filter","shape":[3,3,64,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.006593170586754294,"min":-0.6329443763284123}},{"name":"dense1/conv1/pointwise_filter","shape":[1,1,64,64],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.012215249211180444,"min":-1.6001976466646382}},{"name":"dense1/conv1/bias","shape":[64],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.002384825547536214,"min":-0.3028728445370992}},{"name":"dense1/conv2/depthwise_filter","shape":[3,3,64,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.005859645441466687,"min":-0.7617539073906693}},{"name":"dense1/conv2/pointwise_filter","shape":[1,1,64,64],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.013121426806730382,"min":-1.7845140457153321}},{"name":"dense1/conv2/bias","shape":[64],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.0032247188044529336,"min":-0.46435950784122243}},{"name":"dense2/conv0/depthwise_filter","shape":[3,3,64,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.002659512618008782,"min":-0.32977956463308894}},{"name":"dense2/conv0/pointwise_filter","shape":[1,1,64,128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.015499923743453681,"min":-1.9839902391620712}},{"name":"dense2/conv0/bias","shape":[128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.0032450980999890497,"min":-0.522460794098237}},{"name":"dense2/conv1/depthwise_filter","shape":[3,3,128,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.005911862382701799,"min":-0.792189559282041}},{"name":"dense2/conv1/pointwise_filter","shape":[1,1,128,128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.021025861478319356,"min":-2.2077154552235325}},{"name":"dense2/conv1/bias","shape":[128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.00349616945958605,"min":-0.46149436866535865}},{"name":"dense2/conv2/depthwise_filter","shape":[3,3,128,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.008104994250278847,"min":-1.013124281284856}},{"name":"dense2/conv2/pointwise_filter","shape":[1,1,128,128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.029337059282789044,"min":-3.5791212325002633}},{"name":"dense2/conv2/bias","shape":[128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.0038808938334969913,"min":-0.4230174278511721}},{"name":"fc/weights","shape":[128,136],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.014016061670639936,"min":-1.8921683255363912}},{"name":"fc/bias","shape":[136],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.0029505149698724935,"min":0.088760145008564}}],"paths":["face_landmark_68_tiny_model-shard1"]}]
--------------------------------------------------------------------------------
/Django Application/static/json/face_recognition_model-shard1:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/abhijithjadhav/Deepfake_detection_using_deep_learning/534f0e9915754b3a21eb1a838925f60d90f2866c/Django Application/static/json/face_recognition_model-shard1
--------------------------------------------------------------------------------
/Django Application/static/json/face_recognition_model-shard2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/abhijithjadhav/Deepfake_detection_using_deep_learning/534f0e9915754b3a21eb1a838925f60d90f2866c/Django Application/static/json/face_recognition_model-shard2
--------------------------------------------------------------------------------
/Django Application/static/json/face_recognition_model-weights_manifest.json:
--------------------------------------------------------------------------------
1 | [{"weights":[{"name":"conv32_down/conv/filters","shape":[7,7,3,32],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.0005260649557207145,"min":-0.07101876902229645}},{"name":"conv32_down/conv/bias","shape":[32],"dtype":"float32","quantization":{"dtype":"uint8","scale":8.471445956577858e-7,"min":-0.00014740315964445472}},{"name":"conv32_down/scale/weights","shape":[32],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.06814416062598135,"min":5.788674831390381}},{"name":"conv32_down/scale/biases","shape":[32],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.008471635042452345,"min":-0.931879854669758}},{"name":"conv32_1/conv1/conv/filters","shape":[3,3,32,32],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.0007328585666768691,"min":-0.0974701893680236}},{"name":"conv32_1/conv1/conv/bias","shape":[32],"dtype":"float32","quantization":{"dtype":"uint8","scale":1.5952091238361e-8,"min":-0.000001978059313556764}},{"name":"conv32_1/conv1/scale/weights","shape":[32],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.02146628510718252,"min":3.1103382110595703}},{"name":"conv32_1/conv1/scale/biases","shape":[32],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.0194976619645661,"min":-2.3787147596770644}},{"name":"conv32_1/conv2/conv/filters","shape":[3,3,32,32],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.0004114975824075587,"min":-0.05267169054816751}},{"name":"conv32_1/conv2/conv/bias","shape":[32],"dtype":"float32","quantization":{"dtype":"uint8","scale":4.600177166424806e-9,"min":-5.70421968636676e-7}},{"name":"conv32_1/conv2/scale/weights","shape":[32],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.03400764932819441,"min":2.1677730083465576}},{"name":"conv32_1/conv2/scale/biases","shape":[32],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.010974494616190593,"min":-1.240117891629537}},{"name":"conv32_2/conv1/conv/filters","shape":[3,3,32,32],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.0005358753251094444,"min":-0.0760942961655411}},{"name":"conv32_2/conv1/conv/bias","shape":[32],"dtype":"float32","quantization":{"dtype":"uint8","scale":5.9886454383719385e-9,"min":-7.366033889197485e-7}},{"name":"conv32_2/conv1/scale/weights","shape":[32],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.014633869657329485,"min":2.769575357437134}},{"name":"conv32_2/conv1/scale/biases","shape":[32],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.022131107367721257,"min":-2.5229462399202234}},{"name":"conv32_2/conv2/conv/filters","shape":[3,3,32,32],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.00030145110452876373,"min":-0.03949009469326805}},{"name":"conv32_2/conv2/conv/bias","shape":[32],"dtype":"float32","quantization":{"dtype":"uint8","scale":6.8779549306497095e-9,"min":-9.010120959151119e-7}},{"name":"conv32_2/conv2/scale/weights","shape":[32],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.03929369870354148,"min":4.8010945320129395}},{"name":"conv32_2/conv2/scale/biases","shape":[32],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.010553357180427103,"min":-1.2452961472903983}},{"name":"conv32_3/conv1/conv/filters","shape":[3,3,32,32],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.0003133527642371608,"min":-0.040735859350830905}},{"name":"conv32_3/conv1/conv/bias","shape":[32],"dtype":"float32","quantization":{"dtype":"uint8","scale":4.1064200719547974e-9,"min":-3.0387508532465503e-7}},{"name":"conv32_3/conv1/scale/weights","shape":[32],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.009252088210161994,"min":2.333256721496582}},{"name":"conv32_3/conv1/scale/biases","shape":[32],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.007104101251153385,"min":-0.34810096130651585}},{"name":"conv32_3/conv2/conv/filters","shape":[3,3,32,32],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.00029995629892629733,"min":-0.031195455088334923}},{"name":"conv32_3/conv2/conv/bias","shape":[32],"dtype":"float32","quantization":{"dtype":"uint8","scale":5.62726418316814e-9,"min":-6.921534945296811e-7}},{"name":"conv32_3/conv2/scale/weights","shape":[32],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.0467432975769043,"min":5.362040996551514}},{"name":"conv32_3/conv2/scale/biases","shape":[32],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.010314425300149357,"min":-1.268674311918371}},{"name":"conv64_down/conv1/conv/filters","shape":[3,3,32,64],"dtype":"float32"},{"name":"conv64_down/conv1/conv/bias","shape":[64],"dtype":"float32","quantization":{"dtype":"uint8","scale":8.373908033218849e-10,"min":-1.172347124650639e-7}},{"name":"conv64_down/conv1/scale/weights","shape":[64],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.0066875364266189875,"min":2.5088400840759277}},{"name":"conv64_down/conv1/scale/biases","shape":[64],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.01691421620986041,"min":-2.0973628100226906}},{"name":"conv64_down/conv2/conv/filters","shape":[3,3,64,64],"dtype":"float32"},{"name":"conv64_down/conv2/conv/bias","shape":[64],"dtype":"float32","quantization":{"dtype":"uint8","scale":2.3252014483766877e-9,"min":-2.673981665633191e-7}},{"name":"conv64_down/conv2/scale/weights","shape":[64],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.032557439804077146,"min":2.6351239681243896}},{"name":"conv64_down/conv2/scale/biases","shape":[64],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.015429047509735706,"min":-1.5429047509735707}},{"name":"conv64_1/conv1/conv/filters","shape":[3,3,64,64],"dtype":"float32"},{"name":"conv64_1/conv1/conv/bias","shape":[64],"dtype":"float32","quantization":{"dtype":"uint8","scale":1.1319172039756998e-9,"min":-1.4941307092479238e-7}},{"name":"conv64_1/conv1/scale/weights","shape":[64],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.007802607031429515,"min":3.401733160018921}},{"name":"conv64_1/conv1/scale/biases","shape":[64],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.01425027146058924,"min":-0.6982633015688727}},{"name":"conv64_1/conv2/conv/filters","shape":[3,3,64,64],"dtype":"float32"},{"name":"conv64_1/conv2/conv/bias","shape":[64],"dtype":"float32","quantization":{"dtype":"uint8","scale":2.5635019893325435e-9,"min":-2.717312108692496e-7}},{"name":"conv64_1/conv2/scale/weights","shape":[64],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.04062801716374416,"min":3.542381525039673}},{"name":"conv64_1/conv2/scale/biases","shape":[64],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.007973166306813557,"min":-0.7415044665336609}},{"name":"conv64_2/conv1/conv/filters","shape":[3,3,64,64],"dtype":"float32"},{"name":"conv64_2/conv1/conv/bias","shape":[64],"dtype":"float32","quantization":{"dtype":"uint8","scale":1.2535732661062331e-9,"min":-1.8302169685151004e-7}},{"name":"conv64_2/conv1/scale/weights","shape":[64],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.005631206549850164,"min":2.9051668643951416}},{"name":"conv64_2/conv1/scale/biases","shape":[64],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.01859012585060269,"min":-2.3795361088771445}},{"name":"conv64_2/conv2/conv/filters","shape":[3,3,64,64],"dtype":"float32"},{"name":"conv64_2/conv2/conv/bias","shape":[64],"dtype":"float32","quantization":{"dtype":"uint8","scale":2.486726369919351e-9,"min":-3.5311514452854786e-7}},{"name":"conv64_2/conv2/scale/weights","shape":[64],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.03740917467603497,"min":5.571568965911865}},{"name":"conv64_2/conv2/scale/biases","shape":[64],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.006418555858088475,"min":-0.5263215803632549}},{"name":"conv64_3/conv1/conv/filters","shape":[3,3,64,64],"dtype":"float32"},{"name":"conv64_3/conv1/conv/bias","shape":[64],"dtype":"float32","quantization":{"dtype":"uint8","scale":7.432564576875473e-10,"min":-8.47312361763804e-8}},{"name":"conv64_3/conv1/scale/weights","shape":[64],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.006400122362024644,"min":2.268010377883911}},{"name":"conv64_3/conv1/scale/biases","shape":[64],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.010945847922680425,"min":-1.3353934465670119}},{"name":"conv64_3/conv2/conv/filters","shape":[3,3,64,64],"dtype":"float32"},{"name":"conv64_3/conv2/conv/bias","shape":[64],"dtype":"float32","quantization":{"dtype":"uint8","scale":2.278228722014533e-9,"min":-3.212302498040492e-7}},{"name":"conv64_3/conv2/scale/weights","shape":[64],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.029840927498013366,"min":7.038398265838623}},{"name":"conv64_3/conv2/scale/biases","shape":[64],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.010651412197187834,"min":-1.161003929493474}},{"name":"conv128_down/conv1/conv/filters","shape":[3,3,64,128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.00020040544662989823,"min":-0.022245004575918704}},{"name":"conv128_down/conv1/conv/bias","shape":[128],"dtype":"float32","quantization":{"dtype":"uint8","scale":4.3550543563576545e-10,"min":-4.311503812794078e-8}},{"name":"conv128_down/conv1/scale/weights","shape":[128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.007448580685783835,"min":2.830846071243286}},{"name":"conv128_down/conv1/scale/biases","shape":[128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.01211262824488621,"min":-1.6957679542840696}},{"name":"conv128_down/conv2/conv/filters","shape":[3,3,128,128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.00022380277514457702,"min":-0.02484210804104805}},{"name":"conv128_down/conv2/conv/bias","shape":[128],"dtype":"float32","quantization":{"dtype":"uint8","scale":9.031058637304466e-10,"min":-1.1650065642122761e-7}},{"name":"conv128_down/conv2/scale/weights","shape":[128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.027663578706629135,"min":3.1111555099487305}},{"name":"conv128_down/conv2/scale/biases","shape":[128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.008878476946961646,"min":-1.029903325847551}},{"name":"conv128_1/conv1/conv/filters","shape":[3,3,128,128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.00022380667574265425,"min":-0.032899581334170175}},{"name":"conv128_1/conv1/conv/bias","shape":[128],"dtype":"float32","quantization":{"dtype":"uint8","scale":4.4147297756478345e-10,"min":-5.253528433020923e-8}},{"name":"conv128_1/conv1/scale/weights","shape":[128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.013599334978589825,"min":3.634530782699585}},{"name":"conv128_1/conv1/scale/biases","shape":[128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.014059314073300829,"min":-1.4059314073300828}},{"name":"conv128_1/conv2/conv/filters","shape":[3,3,128,128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.00021715293474057143,"min":-0.02909849325523657}},{"name":"conv128_1/conv2/conv/bias","shape":[128],"dtype":"float32","quantization":{"dtype":"uint8","scale":9.887046963276768e-10,"min":-1.1370104007768284e-7}},{"name":"conv128_1/conv2/scale/weights","shape":[128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.029993299409454943,"min":3.630716562271118}},{"name":"conv128_1/conv2/scale/biases","shape":[128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.00782704236460667,"min":-0.7200878975438136}},{"name":"conv128_2/conv1/conv/filters","shape":[3,3,128,128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.00017718105923895743,"min":-0.022324813464108636}},{"name":"conv128_2/conv1/conv/bias","shape":[128],"dtype":"float32","quantization":{"dtype":"uint8","scale":3.567012027797675e-10,"min":-5.243507680862582e-8}},{"name":"conv128_2/conv1/scale/weights","shape":[128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.007940645778880399,"min":4.927767753601074}},{"name":"conv128_2/conv1/scale/biases","shape":[128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.015933452867994122,"min":-1.5614783810634238}},{"name":"conv128_2/conv2/conv/filters","shape":[3,3,128,128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.0001451439717236687,"min":-0.01712698866339291}},{"name":"conv128_2/conv2/conv/bias","shape":[128],"dtype":"float32","quantization":{"dtype":"uint8","scale":1.0383988570966347e-9,"min":-1.2356946399449953e-7}},{"name":"conv128_2/conv2/scale/weights","shape":[128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.02892604528688917,"min":4.750600814819336}},{"name":"conv128_2/conv2/scale/biases","shape":[128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.00797275748907351,"min":-0.7414664464838364}},{"name":"conv256_down/conv1/conv/filters","shape":[3,3,128,256],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.0002698827827093648,"min":-0.03994265184098599}},{"name":"conv256_down/conv1/conv/bias","shape":[256],"dtype":"float32","quantization":{"dtype":"uint8","scale":5.036909834755123e-10,"min":-6.396875490139006e-8}},{"name":"conv256_down/conv1/scale/weights","shape":[256],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.014870181738161573,"min":4.269900798797607}},{"name":"conv256_down/conv1/scale/biases","shape":[256],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.022031106200872685,"min":-3.1063859743230484}},{"name":"conv256_down/conv2/conv/filters","shape":[3,3,256,256],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.00046430734150549946,"min":-0.03946612402796745}},{"name":"conv256_down/conv2/conv/bias","shape":[256],"dtype":"float32","quantization":{"dtype":"uint8","scale":6.693064577513153e-10,"min":-7.630093618364995e-8}},{"name":"conv256_down/conv2/scale/weights","shape":[256],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.03475512242784687,"min":3.608360528945923}},{"name":"conv256_down/conv2/scale/biases","shape":[256],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.01290142021927179,"min":-1.1482263995151893}},{"name":"conv256_1/conv1/conv/filters","shape":[3,3,256,256],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.00037147209924810076,"min":-0.04234781931428348}},{"name":"conv256_1/conv1/conv/bias","shape":[256],"dtype":"float32","quantization":{"dtype":"uint8","scale":3.2105515457510146e-10,"min":-3.467395669411096e-8}},{"name":"conv256_1/conv1/scale/weights","shape":[256],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.043242172166412955,"min":5.28542947769165}},{"name":"conv256_1/conv1/scale/biases","shape":[256],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.01643658619300992,"min":-1.3149268954407936}},{"name":"conv256_1/conv2/conv/filters","shape":[3,3,256,256],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.0003289232651392619,"min":-0.041773254672686264}},{"name":"conv256_1/conv2/conv/bias","shape":[256],"dtype":"float32","quantization":{"dtype":"uint8","scale":9.13591691187321e-10,"min":-1.2333487831028833e-7}},{"name":"conv256_1/conv2/scale/weights","shape":[256],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.0573908618852204,"min":4.360693454742432}},{"name":"conv256_1/conv2/scale/biases","shape":[256],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.0164216583850337,"min":-1.3958409627278647}},{"name":"conv256_2/conv1/conv/filters","shape":[3,3,256,256],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.00010476927912118389,"min":-0.015610622589056398}},{"name":"conv256_2/conv1/conv/bias","shape":[256],"dtype":"float32","quantization":{"dtype":"uint8","scale":2.418552539068639e-10,"min":-2.539480166022071e-8}},{"name":"conv256_2/conv1/scale/weights","shape":[256],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.06024209564807368,"min":6.598613739013672}},{"name":"conv256_2/conv1/scale/biases","shape":[256],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.01578534350675695,"min":-1.1049740454729864}},{"name":"conv256_2/conv2/conv/filters","shape":[3,3,256,256],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.00005543030908002573,"min":-0.007427661416723448}},{"name":"conv256_2/conv2/conv/bias","shape":[256],"dtype":"float32","quantization":{"dtype":"uint8","scale":1.0822061852320308e-9,"min":-1.515088659324843e-7}},{"name":"conv256_2/conv2/scale/weights","shape":[256],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.04302893993901272,"min":2.2855491638183594}},{"name":"conv256_2/conv2/scale/biases","shape":[256],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.006792667566561232,"min":-0.8083274404207865}},{"name":"conv256_down_out/conv1/conv/filters","shape":[3,3,256,256],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.000568966465253456,"min":-0.05632768006009214}},{"name":"conv256_down_out/conv1/conv/bias","shape":[256],"dtype":"float32","quantization":{"dtype":"uint8","scale":4.5347887884881677e-10,"min":-6.530095855422961e-8}},{"name":"conv256_down_out/conv1/scale/weights","shape":[256],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.017565592597512638,"min":4.594101905822754}},{"name":"conv256_down_out/conv1/scale/biases","shape":[256],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.04850864223405427,"min":-6.306123490427055}},{"name":"conv256_down_out/conv2/conv/filters","shape":[3,3,256,256],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.0003739110687199761,"min":-0.06954745878191555}},{"name":"conv256_down_out/conv2/conv/bias","shape":[256],"dtype":"float32","quantization":{"dtype":"uint8","scale":1.2668428328152895e-9,"min":-2.2549802424112154e-7}},{"name":"conv256_down_out/conv2/scale/weights","shape":[256],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.04351314469879749,"min":4.31956672668457}},{"name":"conv256_down_out/conv2/scale/biases","shape":[256],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.021499746921015722,"min":-1.2039858275768804}},{"name":"fc","shape":[256,128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.000357687911566566,"min":-0.04578405268052045}}],"paths":["face_recognition_model-shard1","face_recognition_model-shard2"]}]
--------------------------------------------------------------------------------
/Django Application/static/json/mtcnn_model-shard1:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/abhijithjadhav/Deepfake_detection_using_deep_learning/534f0e9915754b3a21eb1a838925f60d90f2866c/Django Application/static/json/mtcnn_model-shard1
--------------------------------------------------------------------------------
/Django Application/static/json/mtcnn_model-weights_manifest.json:
--------------------------------------------------------------------------------
1 | [{"paths":["mtcnn_model-shard1"],"weights":[{"dtype":"float32","name":"pnet/conv1/weights","shape":[3,3,3,10]},{"dtype":"float32","name":"pnet/conv1/bias","shape":[10]},{"dtype":"float32","name":"pnet/prelu1_alpha","shape":[10]},{"dtype":"float32","name":"pnet/conv2/weights","shape":[3,3,10,16]},{"dtype":"float32","name":"pnet/conv2/bias","shape":[16]},{"dtype":"float32","name":"pnet/prelu2_alpha","shape":[16]},{"dtype":"float32","name":"pnet/conv3/weights","shape":[3,3,16,32]},{"dtype":"float32","name":"pnet/conv3/bias","shape":[32]},{"dtype":"float32","name":"pnet/prelu3_alpha","shape":[32]},{"dtype":"float32","name":"pnet/conv4_1/weights","shape":[1,1,32,2]},{"dtype":"float32","name":"pnet/conv4_1/bias","shape":[2]},{"dtype":"float32","name":"pnet/conv4_2/weights","shape":[1,1,32,4]},{"dtype":"float32","name":"pnet/conv4_2/bias","shape":[4]},{"dtype":"float32","name":"rnet/conv1/weights","shape":[3,3,3,28]},{"dtype":"float32","name":"rnet/conv1/bias","shape":[28]},{"dtype":"float32","name":"rnet/prelu1_alpha","shape":[28]},{"dtype":"float32","name":"rnet/conv2/weights","shape":[3,3,28,48]},{"dtype":"float32","name":"rnet/conv2/bias","shape":[48]},{"dtype":"float32","name":"rnet/prelu2_alpha","shape":[48]},{"dtype":"float32","name":"rnet/conv3/weights","shape":[2,2,48,64]},{"dtype":"float32","name":"rnet/conv3/bias","shape":[64]},{"dtype":"float32","name":"rnet/prelu3_alpha","shape":[64]},{"dtype":"float32","name":"rnet/fc1/weights","shape":[576,128]},{"dtype":"float32","name":"rnet/fc1/bias","shape":[128]},{"dtype":"float32","name":"rnet/prelu4_alpha","shape":[128]},{"dtype":"float32","name":"rnet/fc2_1/weights","shape":[128,2]},{"dtype":"float32","name":"rnet/fc2_1/bias","shape":[2]},{"dtype":"float32","name":"rnet/fc2_2/weights","shape":[128,4]},{"dtype":"float32","name":"rnet/fc2_2/bias","shape":[4]},{"dtype":"float32","name":"onet/conv1/weights","shape":[3,3,3,32]},{"dtype":"float32","name":"onet/conv1/bias","shape":[32]},{"dtype":"float32","name":"onet/prelu1_alpha","shape":[32]},{"dtype":"float32","name":"onet/conv2/weights","shape":[3,3,32,64]},{"dtype":"float32","name":"onet/conv2/bias","shape":[64]},{"dtype":"float32","name":"onet/prelu2_alpha","shape":[64]},{"dtype":"float32","name":"onet/conv3/weights","shape":[3,3,64,64]},{"dtype":"float32","name":"onet/conv3/bias","shape":[64]},{"dtype":"float32","name":"onet/prelu3_alpha","shape":[64]},{"dtype":"float32","name":"onet/conv4/weights","shape":[2,2,64,128]},{"dtype":"float32","name":"onet/conv4/bias","shape":[128]},{"dtype":"float32","name":"onet/prelu4_alpha","shape":[128]},{"dtype":"float32","name":"onet/fc1/weights","shape":[1152,256]},{"dtype":"float32","name":"onet/fc1/bias","shape":[256]},{"dtype":"float32","name":"onet/prelu5_alpha","shape":[256]},{"dtype":"float32","name":"onet/fc2_1/weights","shape":[256,2]},{"dtype":"float32","name":"onet/fc2_1/bias","shape":[2]},{"dtype":"float32","name":"onet/fc2_2/weights","shape":[256,4]},{"dtype":"float32","name":"onet/fc2_2/bias","shape":[4]},{"dtype":"float32","name":"onet/fc2_3/weights","shape":[256,10]},{"dtype":"float32","name":"onet/fc2_3/bias","shape":[10]}]}]
--------------------------------------------------------------------------------
/Django Application/static/json/ssd_mobilenetv1_model-shard1:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/abhijithjadhav/Deepfake_detection_using_deep_learning/534f0e9915754b3a21eb1a838925f60d90f2866c/Django Application/static/json/ssd_mobilenetv1_model-shard1
--------------------------------------------------------------------------------
/Django Application/static/json/ssd_mobilenetv1_model-shard2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/abhijithjadhav/Deepfake_detection_using_deep_learning/534f0e9915754b3a21eb1a838925f60d90f2866c/Django Application/static/json/ssd_mobilenetv1_model-shard2
--------------------------------------------------------------------------------
/Django Application/static/json/tiny_face_detector_model-shard1:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/abhijithjadhav/Deepfake_detection_using_deep_learning/534f0e9915754b3a21eb1a838925f60d90f2866c/Django Application/static/json/tiny_face_detector_model-shard1
--------------------------------------------------------------------------------
/Django Application/static/json/tiny_face_detector_model-weights_manifest.json:
--------------------------------------------------------------------------------
1 | [{"weights":[{"name":"conv0/filters","shape":[3,3,3,16],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.009007044399485869,"min":-1.2069439495311063}},{"name":"conv0/bias","shape":[16],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.005263455241334205,"min":-0.9211046672334858}},{"name":"conv1/depthwise_filter","shape":[3,3,16,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.004001977630690033,"min":-0.5042491814669441}},{"name":"conv1/pointwise_filter","shape":[1,1,16,32],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.013836609615999109,"min":-1.411334180831909}},{"name":"conv1/bias","shape":[32],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.0015159862590771096,"min":-0.30926119685173037}},{"name":"conv2/depthwise_filter","shape":[3,3,32,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.002666276225856706,"min":-0.317286870876948}},{"name":"conv2/pointwise_filter","shape":[1,1,32,64],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.015265831292844286,"min":-1.6792414422128714}},{"name":"conv2/bias","shape":[64],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.0020280554598453,"min":-0.37113414915168985}},{"name":"conv3/depthwise_filter","shape":[3,3,64,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.006100742489683862,"min":-0.8907084034938438}},{"name":"conv3/pointwise_filter","shape":[1,1,64,128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.016276211832083907,"min":-2.0508026908425725}},{"name":"conv3/bias","shape":[128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.003394414279975143,"min":-0.7637432129944072}},{"name":"conv4/depthwise_filter","shape":[3,3,128,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.006716050119961009,"min":-0.8059260143953211}},{"name":"conv4/pointwise_filter","shape":[1,1,128,256],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.021875603993733724,"min":-2.8875797271728514}},{"name":"conv4/bias","shape":[256],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.0041141652009066415,"min":-0.8187188749804216}},{"name":"conv5/depthwise_filter","shape":[3,3,256,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.008423839597141042,"min":-0.9013508368940915}},{"name":"conv5/pointwise_filter","shape":[1,1,256,512],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.030007277283014035,"min":-3.8709387695088107}},{"name":"conv5/bias","shape":[512],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.008402082966823203,"min":-1.4871686851277068}},{"name":"conv8/filters","shape":[1,1,512,25],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.028336129469030042,"min":-4.675461362389957}},{"name":"conv8/bias","shape":[25],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.002268134028303857,"min":-0.41053225912299807}}],"paths":["tiny_face_detector_model-shard1"]}]
--------------------------------------------------------------------------------
/Django Application/templates/base.html:
--------------------------------------------------------------------------------
1 |
2 |
3 | {% load static %}
4 |
5 |
6 |
7 |
9 |
10 |
11 |
12 |
13 |
14 | Deepfake Detection
15 |
16 |
17 | {%include 'nav-bar.html'%}
18 | {%block content%}
19 | {%endblock%}
20 |
22 |
23 |
26 |
27 |
29 |
30 |
31 | {%block js_cripts%}
32 | {%endblock%}
33 | {%include 'footer.html'%}
34 |
35 |
36 |
--------------------------------------------------------------------------------
/Django Application/templates/footer.html:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/Django Application/templates/nav-bar.html:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/Django Application/uploaded_images/Readme.txt:
--------------------------------------------------------------------------------
1 | All the images will come here
2 |
--------------------------------------------------------------------------------
/Django Application/uploaded_videos/Readme.txt:
--------------------------------------------------------------------------------
1 | All the video will come here
2 |
--------------------------------------------------------------------------------
/Documentation/B1_poster .pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/abhijithjadhav/Deepfake_detection_using_deep_learning/534f0e9915754b3a21eb1a838925f60d90f2866c/Documentation/B1_poster .pdf
--------------------------------------------------------------------------------
/Documentation/ESE presentation.pptx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/abhijithjadhav/Deepfake_detection_using_deep_learning/534f0e9915754b3a21eb1a838925f60d90f2866c/Documentation/ESE presentation.pptx
--------------------------------------------------------------------------------
/Documentation/IJSRDV8I10860.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/abhijithjadhav/Deepfake_detection_using_deep_learning/534f0e9915754b3a21eb1a838925f60d90f2866c/Documentation/IJSRDV8I10860.pdf
--------------------------------------------------------------------------------
/Documentation/Project Report.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/abhijithjadhav/Deepfake_detection_using_deep_learning/534f0e9915754b3a21eb1a838925f60d90f2866c/Documentation/Project Report.pdf
--------------------------------------------------------------------------------
/Documentation/README.md:
--------------------------------------------------------------------------------
1 | # Please find some of the refrence blogs on Medium
2 |
3 |
4 | - [Deepfake Detection using Deep Learning Code Walkthrough (Part 1)](https://abhijithjadhav.medium.com/deepfake-detection-using-deep-learning-code-walkthrough-part-1-b5e43d45e59c?source=rss-d84b6adcd7dc------2)
5 | - [Multiple Images in Single Container Docker setup for Nvidia Cuda](https://abhijithjadhav.medium.com/multiple-images-in-single-container-docker-setup-for-nvidia-cuda-2f7d5548c2b0?source=rss-d84b6adcd7dc------2)
6 | - [Dockerise deepfake detection Django application using Nvidia Cuda](https://abhijithjadhav.medium.com/dockerise-deepfake-detection-django-application-using-nvidia-cuda-40cdda3b6d38?source=rss-d84b6adcd7dc------2)
7 | - [Keep your mouse moving to avoid going away on Microsoft Teams](https://abhijithjadhav.medium.com/keep-your-mouse-moving-to-avoid-going-away-on-microsoft-teams-8a7801313990?source=rss-d84b6adcd7dc------2)
8 | - [Deepfake Video Detection Using Long Short-Term Memory](https://abhijithjadhav.medium.com/deepfake-video-detection-using-long-short-term-memory-df3674f83ecc?source=rss-d84b6adcd7dc------2)
9 |
10 | 🔖 [More Blogs](https://medium.com/@abhijithjadhav)
11 |
--------------------------------------------------------------------------------
/Model Creation/Helpers/Create_csv_from_glob.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import glob\n",
10 | "import pandas as pd"
11 | ]
12 | },
13 | {
14 | "cell_type": "code",
15 | "execution_count": 30,
16 | "metadata": {},
17 | "outputs": [],
18 | "source": [
19 | "video_files = glob.glob(\"F:/College/BE/Final year project/Dataset/Celeb-DF-v2/YouTube-real/*.mp4\")"
20 | ]
21 | },
22 | {
23 | "cell_type": "code",
24 | "execution_count": 31,
25 | "metadata": {},
26 | "outputs": [],
27 | "source": [
28 | "names =[]\n",
29 | "for i in video_files:\n",
30 | " names.append(i.split(\"\\\\\")[-1])"
31 | ]
32 | },
33 | {
34 | "cell_type": "code",
35 | "execution_count": 32,
36 | "metadata": {},
37 | "outputs": [
38 | {
39 | "data": {
40 | "text/html": [
41 | "\n",
42 | "\n",
55 | "
\n",
56 | " \n",
57 | " \n",
58 | " | \n",
59 | " 0 | \n",
60 | "
\n",
61 | " \n",
62 | " \n",
63 | " \n",
64 | " 0 | \n",
65 | " 00000.mp4 | \n",
66 | "
\n",
67 | " \n",
68 | " 1 | \n",
69 | " 00001.mp4 | \n",
70 | "
\n",
71 | " \n",
72 | " 2 | \n",
73 | " 00002.mp4 | \n",
74 | "
\n",
75 | " \n",
76 | " 3 | \n",
77 | " 00003.mp4 | \n",
78 | "
\n",
79 | " \n",
80 | " 4 | \n",
81 | " 00004.mp4 | \n",
82 | "
\n",
83 | " \n",
84 | " 5 | \n",
85 | " 00005.mp4 | \n",
86 | "
\n",
87 | " \n",
88 | " 6 | \n",
89 | " 00006.mp4 | \n",
90 | "
\n",
91 | " \n",
92 | " 7 | \n",
93 | " 00007.mp4 | \n",
94 | "
\n",
95 | " \n",
96 | " 8 | \n",
97 | " 00008.mp4 | \n",
98 | "
\n",
99 | " \n",
100 | " 9 | \n",
101 | " 00009.mp4 | \n",
102 | "
\n",
103 | " \n",
104 | " 10 | \n",
105 | " 00010.mp4 | \n",
106 | "
\n",
107 | " \n",
108 | " 11 | \n",
109 | " 00011.mp4 | \n",
110 | "
\n",
111 | " \n",
112 | " 12 | \n",
113 | " 00012.mp4 | \n",
114 | "
\n",
115 | " \n",
116 | " 13 | \n",
117 | " 00013.mp4 | \n",
118 | "
\n",
119 | " \n",
120 | " 14 | \n",
121 | " 00014.mp4 | \n",
122 | "
\n",
123 | " \n",
124 | " 15 | \n",
125 | " 00015.mp4 | \n",
126 | "
\n",
127 | " \n",
128 | " 16 | \n",
129 | " 00016.mp4 | \n",
130 | "
\n",
131 | " \n",
132 | " 17 | \n",
133 | " 00017.mp4 | \n",
134 | "
\n",
135 | " \n",
136 | " 18 | \n",
137 | " 00018.mp4 | \n",
138 | "
\n",
139 | " \n",
140 | " 19 | \n",
141 | " 00019.mp4 | \n",
142 | "
\n",
143 | " \n",
144 | " 20 | \n",
145 | " 00020.mp4 | \n",
146 | "
\n",
147 | " \n",
148 | " 21 | \n",
149 | " 00021.mp4 | \n",
150 | "
\n",
151 | " \n",
152 | " 22 | \n",
153 | " 00022.mp4 | \n",
154 | "
\n",
155 | " \n",
156 | " 23 | \n",
157 | " 00023.mp4 | \n",
158 | "
\n",
159 | " \n",
160 | " 24 | \n",
161 | " 00024.mp4 | \n",
162 | "
\n",
163 | " \n",
164 | " 25 | \n",
165 | " 00025.mp4 | \n",
166 | "
\n",
167 | " \n",
168 | " 26 | \n",
169 | " 00026.mp4 | \n",
170 | "
\n",
171 | " \n",
172 | " 27 | \n",
173 | " 00027.mp4 | \n",
174 | "
\n",
175 | " \n",
176 | " 28 | \n",
177 | " 00028.mp4 | \n",
178 | "
\n",
179 | " \n",
180 | " 29 | \n",
181 | " 00029.mp4 | \n",
182 | "
\n",
183 | " \n",
184 | " ... | \n",
185 | " ... | \n",
186 | "
\n",
187 | " \n",
188 | " 270 | \n",
189 | " 00270.mp4 | \n",
190 | "
\n",
191 | " \n",
192 | " 271 | \n",
193 | " 00271.mp4 | \n",
194 | "
\n",
195 | " \n",
196 | " 272 | \n",
197 | " 00272.mp4 | \n",
198 | "
\n",
199 | " \n",
200 | " 273 | \n",
201 | " 00273.mp4 | \n",
202 | "
\n",
203 | " \n",
204 | " 274 | \n",
205 | " 00274.mp4 | \n",
206 | "
\n",
207 | " \n",
208 | " 275 | \n",
209 | " 00275.mp4 | \n",
210 | "
\n",
211 | " \n",
212 | " 276 | \n",
213 | " 00276.mp4 | \n",
214 | "
\n",
215 | " \n",
216 | " 277 | \n",
217 | " 00277.mp4 | \n",
218 | "
\n",
219 | " \n",
220 | " 278 | \n",
221 | " 00278.mp4 | \n",
222 | "
\n",
223 | " \n",
224 | " 279 | \n",
225 | " 00279.mp4 | \n",
226 | "
\n",
227 | " \n",
228 | " 280 | \n",
229 | " 00280.mp4 | \n",
230 | "
\n",
231 | " \n",
232 | " 281 | \n",
233 | " 00281.mp4 | \n",
234 | "
\n",
235 | " \n",
236 | " 282 | \n",
237 | " 00282.mp4 | \n",
238 | "
\n",
239 | " \n",
240 | " 283 | \n",
241 | " 00283.mp4 | \n",
242 | "
\n",
243 | " \n",
244 | " 284 | \n",
245 | " 00284.mp4 | \n",
246 | "
\n",
247 | " \n",
248 | " 285 | \n",
249 | " 00285.mp4 | \n",
250 | "
\n",
251 | " \n",
252 | " 286 | \n",
253 | " 00286.mp4 | \n",
254 | "
\n",
255 | " \n",
256 | " 287 | \n",
257 | " 00287.mp4 | \n",
258 | "
\n",
259 | " \n",
260 | " 288 | \n",
261 | " 00288.mp4 | \n",
262 | "
\n",
263 | " \n",
264 | " 289 | \n",
265 | " 00289.mp4 | \n",
266 | "
\n",
267 | " \n",
268 | " 290 | \n",
269 | " 00290.mp4 | \n",
270 | "
\n",
271 | " \n",
272 | " 291 | \n",
273 | " 00291.mp4 | \n",
274 | "
\n",
275 | " \n",
276 | " 292 | \n",
277 | " 00292.mp4 | \n",
278 | "
\n",
279 | " \n",
280 | " 293 | \n",
281 | " 00293.mp4 | \n",
282 | "
\n",
283 | " \n",
284 | " 294 | \n",
285 | " 00294.mp4 | \n",
286 | "
\n",
287 | " \n",
288 | " 295 | \n",
289 | " 00295.mp4 | \n",
290 | "
\n",
291 | " \n",
292 | " 296 | \n",
293 | " 00296.mp4 | \n",
294 | "
\n",
295 | " \n",
296 | " 297 | \n",
297 | " 00297.mp4 | \n",
298 | "
\n",
299 | " \n",
300 | " 298 | \n",
301 | " 00298.mp4 | \n",
302 | "
\n",
303 | " \n",
304 | " 299 | \n",
305 | " 00299.mp4 | \n",
306 | "
\n",
307 | " \n",
308 | "
\n",
309 | "
300 rows × 1 columns
\n",
310 | "
"
311 | ],
312 | "text/plain": [
313 | " 0\n",
314 | "0 00000.mp4\n",
315 | "1 00001.mp4\n",
316 | "2 00002.mp4\n",
317 | "3 00003.mp4\n",
318 | "4 00004.mp4\n",
319 | "5 00005.mp4\n",
320 | "6 00006.mp4\n",
321 | "7 00007.mp4\n",
322 | "8 00008.mp4\n",
323 | "9 00009.mp4\n",
324 | "10 00010.mp4\n",
325 | "11 00011.mp4\n",
326 | "12 00012.mp4\n",
327 | "13 00013.mp4\n",
328 | "14 00014.mp4\n",
329 | "15 00015.mp4\n",
330 | "16 00016.mp4\n",
331 | "17 00017.mp4\n",
332 | "18 00018.mp4\n",
333 | "19 00019.mp4\n",
334 | "20 00020.mp4\n",
335 | "21 00021.mp4\n",
336 | "22 00022.mp4\n",
337 | "23 00023.mp4\n",
338 | "24 00024.mp4\n",
339 | "25 00025.mp4\n",
340 | "26 00026.mp4\n",
341 | "27 00027.mp4\n",
342 | "28 00028.mp4\n",
343 | "29 00029.mp4\n",
344 | ".. ...\n",
345 | "270 00270.mp4\n",
346 | "271 00271.mp4\n",
347 | "272 00272.mp4\n",
348 | "273 00273.mp4\n",
349 | "274 00274.mp4\n",
350 | "275 00275.mp4\n",
351 | "276 00276.mp4\n",
352 | "277 00277.mp4\n",
353 | "278 00278.mp4\n",
354 | "279 00279.mp4\n",
355 | "280 00280.mp4\n",
356 | "281 00281.mp4\n",
357 | "282 00282.mp4\n",
358 | "283 00283.mp4\n",
359 | "284 00284.mp4\n",
360 | "285 00285.mp4\n",
361 | "286 00286.mp4\n",
362 | "287 00287.mp4\n",
363 | "288 00288.mp4\n",
364 | "289 00289.mp4\n",
365 | "290 00290.mp4\n",
366 | "291 00291.mp4\n",
367 | "292 00292.mp4\n",
368 | "293 00293.mp4\n",
369 | "294 00294.mp4\n",
370 | "295 00295.mp4\n",
371 | "296 00296.mp4\n",
372 | "297 00297.mp4\n",
373 | "298 00298.mp4\n",
374 | "299 00299.mp4\n",
375 | "\n",
376 | "[300 rows x 1 columns]"
377 | ]
378 | },
379 | "execution_count": 32,
380 | "metadata": {},
381 | "output_type": "execute_result"
382 | }
383 | ],
384 | "source": [
385 | "df = pd.DataFrame(names)\n",
386 | "df"
387 | ]
388 | },
389 | {
390 | "cell_type": "code",
391 | "execution_count": 33,
392 | "metadata": {},
393 | "outputs": [],
394 | "source": [
395 | "df.to_csv(\"F:/College/BE/Final year project/Dataset/Celeb-DF-v2/real_youtube.csv\")"
396 | ]
397 | }
398 | ],
399 | "metadata": {
400 | "kernelspec": {
401 | "display_name": "Python 3",
402 | "language": "python",
403 | "name": "python3"
404 | },
405 | "language_info": {
406 | "codemirror_mode": {
407 | "name": "ipython",
408 | "version": 3
409 | },
410 | "file_extension": ".py",
411 | "mimetype": "text/x-python",
412 | "name": "python",
413 | "nbconvert_exporter": "python",
414 | "pygments_lexer": "ipython3",
415 | "version": "3.6.5"
416 | }
417 | },
418 | "nbformat": 4,
419 | "nbformat_minor": 2
420 | }
421 |
--------------------------------------------------------------------------------
/Model Creation/Helpers/Remove_audio_altered_files.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import glob\n",
10 | "import pandas as pd"
11 | ]
12 | },
13 | {
14 | "cell_type": "code",
15 | "execution_count": 158,
16 | "metadata": {},
17 | "outputs": [],
18 | "source": [
19 | "path = 'F:/College/BE/Final year project/Dataset/Kaggle dataset/dfdc_train_part_8/'\n",
20 | "video_files = glob.glob(path + \"*.mp4\")"
21 | ]
22 | },
23 | {
24 | "cell_type": "code",
25 | "execution_count": 159,
26 | "metadata": {},
27 | "outputs": [
28 | {
29 | "data": {
30 | "text/plain": [
31 | "0"
32 | ]
33 | },
34 | "execution_count": 159,
35 | "metadata": {},
36 | "output_type": "execute_result"
37 | }
38 | ],
39 | "source": [
40 | "len(video_files)"
41 | ]
42 | },
43 | {
44 | "cell_type": "code",
45 | "execution_count": 160,
46 | "metadata": {},
47 | "outputs": [],
48 | "source": [
49 | "names =[]\n",
50 | "for i in video_files:\n",
51 | " names.append(i.split(\"\\\\\")[-1])"
52 | ]
53 | },
54 | {
55 | "cell_type": "code",
56 | "execution_count": 151,
57 | "metadata": {},
58 | "outputs": [
59 | {
60 | "data": {
61 | "text/plain": [
62 | "2473"
63 | ]
64 | },
65 | "execution_count": 151,
66 | "metadata": {},
67 | "output_type": "execute_result"
68 | }
69 | ],
70 | "source": [
71 | "len(names)"
72 | ]
73 | },
74 | {
75 | "cell_type": "code",
76 | "execution_count": 152,
77 | "metadata": {},
78 | "outputs": [],
79 | "source": [
80 | "media_altered = pd.read_csv(\"F:/College/BE/Final year project/Dataset/Kaggle dataset/metadata_audio_altered.csv\")"
81 | ]
82 | },
83 | {
84 | "cell_type": "code",
85 | "execution_count": 153,
86 | "metadata": {},
87 | "outputs": [],
88 | "source": [
89 | "common = []\n",
90 | "for i in names:\n",
91 | " if i in altered:\n",
92 | " common.append(i)"
93 | ]
94 | },
95 | {
96 | "cell_type": "code",
97 | "execution_count": 154,
98 | "metadata": {},
99 | "outputs": [
100 | {
101 | "data": {
102 | "text/plain": [
103 | "127"
104 | ]
105 | },
106 | "execution_count": 154,
107 | "metadata": {},
108 | "output_type": "execute_result"
109 | }
110 | ],
111 | "source": [
112 | "len(common)"
113 | ]
114 | },
115 | {
116 | "cell_type": "code",
117 | "execution_count": 155,
118 | "metadata": {},
119 | "outputs": [
120 | {
121 | "data": {
122 | "text/plain": [
123 | "2473"
124 | ]
125 | },
126 | "execution_count": 155,
127 | "metadata": {},
128 | "output_type": "execute_result"
129 | }
130 | ],
131 | "source": [
132 | "len(names)"
133 | ]
134 | },
135 | {
136 | "cell_type": "code",
137 | "execution_count": 156,
138 | "metadata": {},
139 | "outputs": [],
140 | "source": [
141 | "import os\n",
142 | "for i in common:\n",
143 | " file =path+i\n",
144 | " if os.path.exists(file):\n",
145 | " os.remove(path+i)\n",
146 | " else:\n",
147 | " print(\"The file does not exist\")\n",
148 | "\n"
149 | ]
150 | },
151 | {
152 | "cell_type": "code",
153 | "execution_count": 157,
154 | "metadata": {},
155 | "outputs": [
156 | {
157 | "data": {
158 | "text/plain": [
159 | "2346"
160 | ]
161 | },
162 | "execution_count": 157,
163 | "metadata": {},
164 | "output_type": "execute_result"
165 | }
166 | ],
167 | "source": [
168 | "new = glob.glob(path + \"*.mp4\")\n",
169 | "len(new)"
170 | ]
171 | },
172 | {
173 | "cell_type": "code",
174 | "execution_count": null,
175 | "metadata": {},
176 | "outputs": [],
177 | "source": []
178 | },
179 | {
180 | "cell_type": "code",
181 | "execution_count": null,
182 | "metadata": {},
183 | "outputs": [],
184 | "source": []
185 | }
186 | ],
187 | "metadata": {
188 | "kernelspec": {
189 | "display_name": "Python 3",
190 | "language": "python",
191 | "name": "python3"
192 | },
193 | "language_info": {
194 | "codemirror_mode": {
195 | "name": "ipython",
196 | "version": 3
197 | },
198 | "file_extension": ".py",
199 | "mimetype": "text/x-python",
200 | "name": "python",
201 | "nbconvert_exporter": "python",
202 | "pygments_lexer": "ipython3",
203 | "version": "3.6.5"
204 | }
205 | },
206 | "nbformat": 4,
207 | "nbformat_minor": 2
208 | }
209 |
--------------------------------------------------------------------------------
/Model Creation/Helpers/for_Balancing_data.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 10,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import json\n",
10 | "import glob\n",
11 | "import numpy as np\n",
12 | "import cv2\n",
13 | "import copy\n",
14 | "import pandas as pd"
15 | ]
16 | },
17 | {
18 | "cell_type": "code",
19 | "execution_count": 71,
20 | "metadata": {},
21 | "outputs": [],
22 | "source": [
23 | "\n",
24 | "path_to_json='F:/College/BE/Final year project/Dataset/Kaggle dataset/Gobal_metadata.json'\n",
25 | "path_to_json = 'F:/College/BE/Final year project/Dataset/Kaggle dataset/deepfake-detection-challenge/train_sample_videos/metadata.json'\n",
26 | "\n",
27 | "\n",
28 | "\n",
29 | "#path_to_json1 = '/content/dfdc_train_part_0/metadata.json'\n",
30 | "with open(path_to_json) as f:\n",
31 | " labels = json.load(f)"
32 | ]
33 | },
34 | {
35 | "cell_type": "code",
36 | "execution_count": 72,
37 | "metadata": {},
38 | "outputs": [
39 | {
40 | "name": "stdout",
41 | "output_type": "stream",
42 | "text": [
43 | "77\n",
44 | "abarnvbtwb.mp4 OR aelfnikyqj.mp4 OR afoovlsmtx.mp4 OR agrmhtjdlk.mp4 OR ahqqqilsxt.mp4 OR ajqslcypsw.mp4 OR anpuvshzoo.mp4 OR asaxgevnnp.mp4 OR atkdltyyen.mp4 OR atvmxvwyns.mp4 OR avmjormvsx.mp4 OR axntxmycwd.mp4 OR aybgughjxh.mp4 OR aybumesmpk.mp4 OR aytzyidmgs.mp4 OR bddjdhzfze.mp4 OR bdnaqemxmr.mp4 OR beboztfcme.mp4 OR bejhvclboh.mp4 OR beyebyhrph.mp4\n",
45 | "bffwsjxghk.mp4 OR bgvhtpzknn.mp4 OR bgwmmujlmc.mp4 OR bilnggbxgu.mp4 OR bmjzrlszhi.mp4 OR bpapbctoao.mp4 OR brwrlczjvi.mp4 OR bulkxhhknf.mp4 OR bwhlgysghg.mp4 OR bwipwzzxxu.mp4 OR bxzakyopjf.mp4 OR bzythlfnhq.mp4 OR caifxvsozs.mp4 OR ccfoszqabv.mp4 OR cfxkpiweqt.mp4 OR chtapglbcj.mp4 OR chviwxsfhg.mp4 OR ciyoudyhly.mp4 OR cizlkenljw.mp4 OR ckjaibzfxa.mp4\n",
46 | "ckkuyewywx.mp4 OR clrycekyst.mp4 OR cmbzllswnl.mp4 OR cobjrlugvp.mp4 OR cpjxareypw.mp4 OR cppdvdejkc.mp4 OR cprhtltsjp.mp4 OR crezycjqyk.mp4 OR cyxlcuyznd.mp4 OR dakiztgtnw.mp4 OR dbnygxtwek.mp4 OR dbtbbhakdv.mp4 OR ddepeddixj.mp4 OR dhcndnuwta.mp4 OR dhxctgyoqj.mp4 OR djxdyjopjd.mp4 OR dkuayagnmc.mp4 OR dkzvdrzcnr.mp4 OR dlpoieqvfb.mp4 OR drcyabprvt.mp4\n"
47 | ]
48 | },
49 | {
50 | "ename": "TypeError",
51 | "evalue": "join() takes exactly one argument (0 given)",
52 | "output_type": "error",
53 | "traceback": [
54 | "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
55 | "\u001b[1;31mTypeError\u001b[0m Traceback (most recent call last)",
56 | "\u001b[1;32m\u001b[0m in \u001b[0;36m\u001b[1;34m()\u001b[0m\n\u001b[0;32m 11\u001b[0m \u001b[0mprint\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\" OR \"\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mjoin\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mlist_of_real\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mj\u001b[0m\u001b[1;33m:\u001b[0m\u001b[0mi\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 12\u001b[0m \u001b[0mj\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mi\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 13\u001b[1;33m \u001b[1;34m\" OR \"\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mjoin\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;31m#search the output by outpu string\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m",
57 | "\u001b[1;31mTypeError\u001b[0m: join() takes exactly one argument (0 given)"
58 | ]
59 | }
60 | ],
61 | "source": [
62 | "df = pd.DataFrame(labels).T\n",
63 | "df.sort_values('label')\n",
64 | "df.loc[df['label'] == 'REAL']\n",
65 | "read_df = pd.DataFrame(df.loc[df['label'] == 'REAL'])\n",
66 | "read_df.index.values\n",
67 | "\n",
68 | "list_of_real = list(read_df.index.values)\n",
69 | "j = 0\n",
70 | "print(len(list_of_real))\n",
71 | "for i in range(20,len(list_of_real),20):\n",
72 | " print(\" OR \".join(list_of_real[j:i]))\n",
73 | " j = i\n",
74 | "\" OR \".join() #search the output by outpu string"
75 | ]
76 | },
77 | {
78 | "cell_type": "code",
79 | "execution_count": 73,
80 | "metadata": {},
81 | "outputs": [
82 | {
83 | "name": "stdout",
84 | "output_type": "stream",
85 | "text": [
86 | "323\n",
87 | "aagfhgtpmv.mp4 OR aapnvogymq.mp4 OR abofeumbvv.mp4 OR abqwwspghj.mp4 OR acifjvzvpm.mp4 OR acqfdwsrhi.mp4 OR acxnxvbsxk.mp4 OR acxwigylke.mp4 OR aczrgyricp.mp4 OR adhsbajydo.mp4 OR adohikbdaz.mp4 OR adylbeequz.mp4 OR aelzhcnwgf.mp4 OR aettqgevhz.mp4 OR aevrfsexku.mp4 OR agdkmztvby.mp4 OR agqphdxmwt.mp4 OR ahbweevwpv.mp4 OR ahdbuwqxit.mp4 OR ahfazfbntc.mp4\n",
88 | "aipfdnwpoo.mp4 OR ajwpjhrbcv.mp4 OR aklqzsddfl.mp4 OR aknbdpmgua.mp4 OR aknmpoonls.mp4 OR akvmwkdyuv.mp4 OR akxoopqjqz.mp4 OR akzbnazxtz.mp4 OR aladcziidp.mp4 OR alaijyygdv.mp4 OR alninxcyhg.mp4 OR altziddtxi.mp4 OR alvgwypubw.mp4 OR amaivqofda.mp4 OR amowujxmzc.mp4 OR andaxzscny.mp4 OR aneclqfpbt.mp4 OR aorjvbyxhw.mp4 OR apatcsqejh.mp4 OR apgjqzkoma.mp4\n",
89 | "apogckdfrz.mp4 OR aqpnvjhuzw.mp4 OR arkroixhey.mp4 OR arlmiizoob.mp4 OR arrhsnjqku.mp4 OR asdpeebotb.mp4 OR aslsvlvpth.mp4 OR asmpfjfzif.mp4 OR asvcrfdpnq.mp4 OR atxvxouljq.mp4 OR atyntldecu.mp4 OR atzdznmder.mp4 OR aufmsmnoye.mp4 OR augtsuxpzc.mp4 OR avfitoutyn.mp4 OR avgiuextiz.mp4 OR avibnnhwhp.mp4 OR avnqydkqjj.mp4 OR avssvvsdhz.mp4 OR avtycwsgyb.mp4\n",
90 | "avvdgsennp.mp4 OR avywawptfc.mp4 OR awhmfnnjih.mp4 OR awnwkrqibf.mp4 OR awukslzjra.mp4 OR axczxisdtb.mp4 OR axoygtekut.mp4 OR axwgcsyphv.mp4 OR axwovszumc.mp4 OR ayqvfdhslr.mp4 OR azpuxunqyo.mp4 OR azsmewqghg.mp4 OR bahdpoesir.mp4 OR bbhpvrmbse.mp4 OR bbhtdfuqxq.mp4 OR bbvgxeczei.mp4 OR bchnbulevv.mp4 OR bctvsmddgq.mp4 OR bdbhekrrwo.mp4 OR bdgipnyobr.mp4\n",
91 | "bdxuhamuqx.mp4 OR benmsfzfaz.mp4 OR bgaogsjehq.mp4 OR bggsurpgpr.mp4 OR bghphrsfxf.mp4 OR bgmlwsoamc.mp4 OR bguwlyazau.mp4 OR bhaaboftbc.mp4 OR bhbdugnurr.mp4 OR bhpwpydzpo.mp4 OR bhsluedavd.mp4 OR bjjbwsqjir.mp4 OR bjkmjilrxp.mp4 OR bjsmaqefoi.mp4 OR bkmdzhfzfh.mp4 OR bkvetcojbt.mp4 OR bkwxhglwct.mp4 OR blpchvmhxx.mp4 OR blzydqdfem.mp4 OR bmbbkwmxqj.mp4\n",
92 | "bmehkyanbj.mp4 OR bmhvktyiwp.mp4 OR bmioepcpsx.mp4 OR bmjmjmbglm.mp4 OR bnbuonyoje.mp4 OR bndybcqhfr.mp4 OR bnjcdrfuov.mp4 OR bntlodcfeg.mp4 OR bofqajtwve.mp4 OR boovltmuwi.mp4 OR bopqhhalml.mp4 OR bourlmzsio.mp4 OR bpwzipqtxf.mp4 OR bpxckdzddv.mp4 OR bqdjzqhcft.mp4 OR bqeiblbxtl.mp4 OR bqhtpqmmqp.mp4 OR bqkdbcqjvb.mp4 OR bqnymlsayl.mp4 OR bqqpbzjgup.mp4\n",
93 | "bqtuuwzdtr.mp4 OR brhalypwoo.mp4 OR brvqtabyxj.mp4 OR bseamdrpbj.mp4 OR bsfmwclnqy.mp4 OR bsqgziaylx.mp4 OR btiysiskpf.mp4 OR btjlfpzbdu.mp4 OR btjwbtsgln.mp4 OR btmsngnqhv.mp4 OR btohlidmru.mp4 OR btugrnoton.mp4 OR btunxncpjh.mp4 OR btxlttbpkj.mp4 OR bvgwelbeof.mp4 OR bvzjkezkms.mp4 OR bweezhfpzp.mp4 OR bwuwstvsbw.mp4 OR bydaidkpdp.mp4 OR byfenovjnf.mp4\n",
94 | "byijojkdba.mp4 OR byofowlkki.mp4 OR byqzyxifza.mp4 OR byunigvnay.mp4 OR byyqectxqa.mp4 OR bzmdrafeex.mp4 OR caqbrkogkb.mp4 OR cbbibzcoih.mp4 OR cbltdtxglo.mp4 OR ccmonzqfrz.mp4 OR cdaxixbosp.mp4 OR cdbsbdymzd.mp4 OR cdphtzqrvp.mp4 OR cdyakrxkia.mp4 OR cepxysienc.mp4 OR cettndmvzl.mp4 OR ceymbecxnj.mp4 OR cferslmfwh.mp4 OR cffffbcywc.mp4 OR cfyduhpbps.mp4\n",
95 | "cglxirfaey.mp4 OR cgvrgibpfo.mp4 OR chzieimrwu.mp4 OR ckbdwedgmc.mp4 OR cknyxaqouy.mp4 OR cksanfsjhc.mp4 OR clihsshdkq.mp4 OR cmxcfkrjiv.mp4 OR cnilkgvfei.mp4 OR coadfnerlk.mp4 OR covdcysmbi.mp4 OR cqfugiqupm.mp4 OR cqhngvpgyi.mp4 OR cqrskwiqng.mp4 OR crktehraph.mp4 OR crzfebnfgb.mp4 OR cthdnahrkh.mp4 OR ctpqeykqdp.mp4 OR cttqtsjvgn.mp4 OR ctzmavwror.mp4\n",
96 | "curpwogllm.mp4 OR cuzrgrbvil.mp4 OR cvaksbpssm.mp4 OR cwbacdwrzo.mp4 OR cwqlvzefpg.mp4 OR cwrtyzndpx.mp4 OR cwsbspfzck.mp4 OR cwwandrkus.mp4 OR cxfujlvsuw.mp4 OR cxrfacemmq.mp4 OR cxttmymlbn.mp4 OR cyboodqqyr.mp4 OR cycacemkmt.mp4 OR cyclgfjdrv.mp4 OR czfunozvwp.mp4 OR czkdanyadc.mp4 OR czmqpxrqoh.mp4 OR dafhtipaml.mp4 OR dakqwktlbi.mp4 OR dbhoxkblzx.mp4\n",
97 | "dbhrpizyeq.mp4 OR dboxtiehng.mp4 OR dbzcqmxzaj.mp4 OR dbzpcjntve.mp4 OR dcamvmuors.mp4 OR dcuiiorugd.mp4 OR ddhfabwpuz.mp4 OR ddjggcasdw.mp4 OR ddpvuimigj.mp4 OR ddqccgmtka.mp4 OR degpbqvcay.mp4 OR deywhkarol.mp4 OR deyyistcrd.mp4 OR dfbpceeaox.mp4 OR dgmevclvzy.mp4 OR dgxrqjdomn.mp4 OR dgzklxjmix.mp4 OR dhcselezer.mp4 OR dhevettufk.mp4 OR dhjmzhrcav.mp4\n",
98 | "dhkwmjxwrn.mp4 OR dhoqofwoxa.mp4 OR diomeixhrg.mp4 OR diopzaywor.mp4 OR diqraixiov.mp4 OR diuzrpqjli.mp4 OR djvtbgwdcc.mp4 OR djvutyvaio.mp4 OR dkdwxmtpuo.mp4 OR dkhlttuvmx.mp4 OR dkrvorliqc.mp4 OR dkwjwbwgey.mp4 OR dlrsbscitn.mp4 OR dnexlwbcxq.mp4 OR dnhvalzvrt.mp4 OR dntkzzzcdh.mp4 OR dnyvfblxpm.mp4 OR doanjploai.mp4 OR dofusvhnib.mp4 OR dozyddhild.mp4\n",
99 | "dptbnjnkdg.mp4 OR dptrzdvwpg.mp4 OR dqnyszdong.mp4 OR dqppxmoqdl.mp4 OR dqqtjcryjv.mp4 OR dqswpjoepo.mp4 OR dqzreruvje.mp4 OR drgjzlxzxj.mp4 OR drsakwyvqv.mp4 OR drtbksnpol.mp4 OR dsdoseflas.mp4 OR dsgpbgsrdm.mp4 OR dsndhujjjb.mp4 OR dtbpmdqvao.mp4 OR dtocdfbwca.mp4 OR dubiroskqn.mp4 OR dulanfulol.mp4 OR duvyaxbzvp.mp4 OR duzuusuajr.mp4 OR dvakowbgbt.mp4\n",
100 | "dvumqqhoac.mp4 OR dwediigjit.mp4 OR dxuliowugt.mp4 OR dxuplhwvig.mp4 OR dzieklokdr.mp4 OR dzqwgqewhu.mp4 OR dzvyfiarrq.mp4 OR dzwkmcwkwl.mp4 OR eahlqmfvtj.mp4 OR eajlrktemq.mp4 OR ebchwmwayp.mp4 OR ebebgmtlcu.mp4 OR ebeknhudxq.mp4 OR ebkzwjgjhq.mp4 OR ebywfrmhtd.mp4 OR ecnihjlfyt.mp4 OR ecuvtoltue.mp4 OR ecwaxgutkc.mp4 OR eczrseixwq.mp4 OR eebrkicpry.mp4\n",
101 | "eebserckhh.mp4 OR eejswgycjc.mp4 OR eekozbeafq.mp4 OR eepezmygaq.mp4 OR eeyhxisdfh.mp4 OR efdyrflcpg.mp4 OR egbbcxcuqy.mp4 OR ehbnclaukr.mp4 OR ehdkmxgtxh.mp4 OR ehevsxtecd.mp4 OR ehfiekigla.mp4 OR ehieahnhte.mp4 OR eiriyukqqy.mp4 OR eivxffliio.mp4 OR eiwopxzjfn.mp4 OR eixwxvxbbn.mp4 OR ejkqesyvam.mp4 OR ekhacizpah.mp4 OR ekkdjkirzq.mp4 OR elginszwtk.mp4\n",
102 | "elvvackpjh.mp4 OR emaalmsonj.mp4 OR emfbhytfhc.mp4 OR emgjphonqb.mp4 OR ensyyivobf.mp4 OR eoewqcpbgt.mp4 OR eprybmbpba.mp4 OR epymyyiblu.mp4 OR eqjscdagiv.mp4 OR eqvuznuwsa.mp4 OR erqgqacbqe.mp4 OR errocgcham.mp4 OR esckbnkkvb.mp4 OR esgftaficx.mp4 OR esnntzzajv.mp4 OR esxrvsgpvb.mp4 OR esyhwdfnxs.mp4 OR esyrimvzsa.mp4 OR etdcqxabww.mp4 OR etejaapnxh.mp4\n"
103 | ]
104 | },
105 | {
106 | "ename": "TypeError",
107 | "evalue": "join() takes exactly one argument (0 given)",
108 | "output_type": "error",
109 | "traceback": [
110 | "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
111 | "\u001b[1;31mTypeError\u001b[0m Traceback (most recent call last)",
112 | "\u001b[1;32m\u001b[0m in \u001b[0;36m\u001b[1;34m()\u001b[0m\n\u001b[0;32m 9\u001b[0m \u001b[0mprint\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\" OR \"\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mjoin\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mlist_of_fake\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mj\u001b[0m\u001b[1;33m:\u001b[0m\u001b[0mi\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 10\u001b[0m \u001b[0mj\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mi\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 11\u001b[1;33m \u001b[1;34m\" OR \"\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mjoin\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;31m#search the output by outpu string\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m",
113 | "\u001b[1;31mTypeError\u001b[0m: join() takes exactly one argument (0 given)"
114 | ]
115 | }
116 | ],
117 | "source": [
118 | "df = pd.DataFrame(labels).T\n",
119 | "read_df = pd.DataFrame(df.loc[df['label'] == 'FAKE'])\n",
120 | "read_df.index.values\n",
121 | "\n",
122 | "list_of_fake = list(read_df.index.values)\n",
123 | "j = 0\n",
124 | "print(len(list_of_fake))\n",
125 | "for i in range(20,len(list_of_fake),20):\n",
126 | " print(\" OR \".join(list_of_fake[j:i]))\n",
127 | " j = i\n",
128 | "\" OR \".join() #search the output by outpu string"
129 | ]
130 | },
131 | {
132 | "cell_type": "code",
133 | "execution_count": null,
134 | "metadata": {},
135 | "outputs": [],
136 | "source": []
137 | }
138 | ],
139 | "metadata": {
140 | "kernelspec": {
141 | "display_name": "Python 3",
142 | "language": "python",
143 | "name": "python3"
144 | },
145 | "language_info": {
146 | "codemirror_mode": {
147 | "name": "ipython",
148 | "version": 3
149 | },
150 | "file_extension": ".py",
151 | "mimetype": "text/x-python",
152 | "name": "python",
153 | "nbconvert_exporter": "python",
154 | "pygments_lexer": "ipython3",
155 | "version": "3.6.5"
156 | }
157 | },
158 | "nbformat": 4,
159 | "nbformat_minor": 2
160 | }
161 |
--------------------------------------------------------------------------------
/Model Creation/Helpers/label_json_to_csv.py:
--------------------------------------------------------------------------------
1 |
2 | # coding: utf-8
3 |
4 | # In[1]:
5 |
6 |
7 | import pandas as pd
8 |
9 |
10 | # In[40]:
11 |
12 |
13 | for i in range(8):
14 | path_json = "F:\College\BE\Final year project\FF_Dataset\Kaggle dataset\dfdc_train_part_"+str(i)+"\metadata.json"
15 | path_csv = "F:\College\BE\Final year project\FF_Dataset\Kaggle dataset\dfdc_train_part_"+str(i)+"\metadata.csv"
16 | print(path_csv)
17 | print(path_json)
18 | read_json = pd.read_json(path_json)
19 | df = pd.DataFrame(read_json)
20 | df_2 = pd.DataFrame(df.transpose())
21 | df_2.to_csv(path_csv)
22 | read_csv = pd.read_csv(path_csv)
23 | read_csv.columns = ["URI","label","original","split"]
24 | read_csv.to_csv(path_csv,index=False)
25 | print(read_csv.head(5))
26 |
27 |
--------------------------------------------------------------------------------
/Model Creation/Predict.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "nbformat": 4,
3 | "nbformat_minor": 0,
4 | "metadata": {
5 | "colab": {
6 | "name": "Predict.ipynb",
7 | "provenance": [],
8 | "collapsed_sections": []
9 | },
10 | "kernelspec": {
11 | "name": "python3",
12 | "display_name": "Python 3"
13 | },
14 | "accelerator": "GPU"
15 | },
16 | "cells": [
17 | {
18 | "cell_type": "code",
19 | "metadata": {
20 | "id": "ddyQ8HE8zeYj",
21 | "colab_type": "code",
22 | "colab": {}
23 | },
24 | "source": [
25 | "#before running this please change the RUNTIME to GPU (Runtime -> Change runtime type -> set harware accelarotor as GPU)\n",
26 | "#Mount our google drive\n",
27 | "from google.colab import drive\n",
28 | "drive.mount('/content/drive')"
29 | ],
30 | "execution_count": null,
31 | "outputs": []
32 | },
33 | {
34 | "cell_type": "code",
35 | "metadata": {
36 | "id": "3vj15Vde0Gxs",
37 | "colab_type": "code",
38 | "colab": {}
39 | },
40 | "source": [
41 | "#import libraries\n",
42 | "!pip3 install face_recognition\n",
43 | "\n",
44 | "import torch\n",
45 | "import torchvision\n",
46 | "from torchvision import transforms\n",
47 | "from torch.utils.data import DataLoader\n",
48 | "from torch.utils.data.dataset import Dataset\n",
49 | "import os\n",
50 | "import numpy as np\n",
51 | "import cv2\n",
52 | "import matplotlib.pyplot as plt\n",
53 | "import face_recognition"
54 | ],
55 | "execution_count": null,
56 | "outputs": []
57 | },
58 | {
59 | "cell_type": "code",
60 | "metadata": {
61 | "id": "C3r-9rMc1DmO",
62 | "colab_type": "code",
63 | "colab": {}
64 | },
65 | "source": [
66 | "#import libraries\n",
67 | "import torch\n",
68 | "from torch.autograd import Variable\n",
69 | "import time\n",
70 | "import os\n",
71 | "import sys\n",
72 | "import os\n",
73 | "from torch import nn\n",
74 | "from torchvision import models"
75 | ],
76 | "execution_count": null,
77 | "outputs": []
78 | },
79 | {
80 | "cell_type": "code",
81 | "metadata": {
82 | "id": "g1G0IoVy2Pc7",
83 | "colab_type": "code",
84 | "colab": {}
85 | },
86 | "source": [
87 | "#Model with feature visualization\n",
88 | "from torch import nn\n",
89 | "from torchvision import models\n",
90 | "class Model(nn.Module):\n",
91 | " def __init__(self, num_classes,latent_dim= 2048, lstm_layers=1 , hidden_dim = 2048, bidirectional = False):\n",
92 | " super(Model, self).__init__()\n",
93 | " model = models.resnext50_32x4d(pretrained = True)\n",
94 | " self.model = nn.Sequential(*list(model.children())[:-2])\n",
95 | " self.lstm = nn.LSTM(latent_dim,hidden_dim, lstm_layers, bidirectional)\n",
96 | " self.relu = nn.LeakyReLU()\n",
97 | " self.dp = nn.Dropout(0.4)\n",
98 | " self.linear1 = nn.Linear(2048,num_classes)\n",
99 | " self.avgpool = nn.AdaptiveAvgPool2d(1)\n",
100 | " def forward(self, x):\n",
101 | " batch_size,seq_length, c, h, w = x.shape\n",
102 | " x = x.view(batch_size * seq_length, c, h, w)\n",
103 | " fmap = self.model(x)\n",
104 | " x = self.avgpool(fmap)\n",
105 | " x = x.view(batch_size,seq_length,2048)\n",
106 | " x_lstm,_ = self.lstm(x,None)\n",
107 | " return fmap,self.dp(self.linear1(x_lstm[:,-1,:]))"
108 | ],
109 | "execution_count": null,
110 | "outputs": []
111 | },
112 | {
113 | "cell_type": "code",
114 | "metadata": {
115 | "id": "avpp16KLze7T",
116 | "colab_type": "code",
117 | "colab": {}
118 | },
119 | "source": [
120 | "im_size = 112\n",
121 | "mean=[0.485, 0.456, 0.406]\n",
122 | "std=[0.229, 0.224, 0.225]\n",
123 | "sm = nn.Softmax()\n",
124 | "inv_normalize = transforms.Normalize(mean=-1*np.divide(mean,std),std=np.divide([1,1,1],std))\n",
125 | "def im_convert(tensor):\n",
126 | " \"\"\" Display a tensor as an image. \"\"\"\n",
127 | " image = tensor.to(\"cpu\").clone().detach()\n",
128 | " image = image.squeeze()\n",
129 | " image = inv_normalize(image)\n",
130 | " image = image.numpy()\n",
131 | " image = image.transpose(1,2,0)\n",
132 | " image = image.clip(0, 1)\n",
133 | " cv2.imwrite('./2.png',image*255)\n",
134 | " return image\n",
135 | "\n",
136 | "def predict(model,img,path = './'):\n",
137 | " fmap,logits = model(img.to('cuda'))\n",
138 | " params = list(model.parameters())\n",
139 | " weight_softmax = model.linear1.weight.detach().cpu().numpy()\n",
140 | " logits = sm(logits)\n",
141 | " _,prediction = torch.max(logits,1)\n",
142 | " confidence = logits[:,int(prediction.item())].item()*100\n",
143 | " print('confidence of prediction:',logits[:,int(prediction.item())].item()*100)\n",
144 | " idx = np.argmax(logits.detach().cpu().numpy())\n",
145 | " bz, nc, h, w = fmap.shape\n",
146 | " out = np.dot(fmap[-1].detach().cpu().numpy().reshape((nc, h*w)).T,weight_softmax[idx,:].T)\n",
147 | " predict = out.reshape(h,w)\n",
148 | " predict = predict - np.min(predict)\n",
149 | " predict_img = predict / np.max(predict)\n",
150 | " predict_img = np.uint8(255*predict_img)\n",
151 | " out = cv2.resize(predict_img, (im_size,im_size))\n",
152 | " heatmap = cv2.applyColorMap(out, cv2.COLORMAP_JET)\n",
153 | " img = im_convert(img[:,-1,:,:,:])\n",
154 | " result = heatmap * 0.5 + img*0.8*255\n",
155 | " cv2.imwrite('/content/1.png',result)\n",
156 | " result1 = heatmap * 0.5/255 + img*0.8\n",
157 | " r,g,b = cv2.split(result1)\n",
158 | " result1 = cv2.merge((r,g,b))\n",
159 | " plt.imshow(result1)\n",
160 | " plt.show()\n",
161 | " return [int(prediction.item()),confidence]\n",
162 | "#img = train_data[100][0].unsqueeze(0)\n",
163 | "#predict(model,img)"
164 | ],
165 | "execution_count": null,
166 | "outputs": []
167 | },
168 | {
169 | "cell_type": "code",
170 | "metadata": {
171 | "id": "asSbpP8fzlFj",
172 | "colab_type": "code",
173 | "colab": {}
174 | },
175 | "source": [
176 | "#!pip3 install face_recognition\n",
177 | "import torch\n",
178 | "import torchvision\n",
179 | "from torchvision import transforms\n",
180 | "from torch.utils.data import DataLoader\n",
181 | "from torch.utils.data.dataset import Dataset\n",
182 | "import os\n",
183 | "import numpy as np\n",
184 | "import cv2\n",
185 | "import matplotlib.pyplot as plt\n",
186 | "import face_recognition\n",
187 | "class validation_dataset(Dataset):\n",
188 | " def __init__(self,video_names,sequence_length = 60,transform = None):\n",
189 | " self.video_names = video_names\n",
190 | " self.transform = transform\n",
191 | " self.count = sequence_length\n",
192 | " def __len__(self):\n",
193 | " return len(self.video_names)\n",
194 | " def __getitem__(self,idx):\n",
195 | " video_path = self.video_names[idx]\n",
196 | " frames = []\n",
197 | " a = int(100/self.count)\n",
198 | " first_frame = np.random.randint(0,a) \n",
199 | " for i,frame in enumerate(self.frame_extract(video_path)):\n",
200 | " #if(i % a == first_frame):\n",
201 | " faces = face_recognition.face_locations(frame)\n",
202 | " try:\n",
203 | " top,right,bottom,left = faces[0]\n",
204 | " frame = frame[top:bottom,left:right,:]\n",
205 | " except:\n",
206 | " pass\n",
207 | " frames.append(self.transform(frame))\n",
208 | " if(len(frames) == self.count):\n",
209 | " break\n",
210 | " #print(\"no of frames\",len(frames))\n",
211 | " frames = torch.stack(frames)\n",
212 | " frames = frames[:self.count]\n",
213 | " return frames.unsqueeze(0)\n",
214 | " def frame_extract(self,path):\n",
215 | " vidObj = cv2.VideoCapture(path) \n",
216 | " success = 1\n",
217 | " while success:\n",
218 | " success, image = vidObj.read()\n",
219 | " if success:\n",
220 | " yield image\n",
221 | "def im_plot(tensor):\n",
222 | " image = tensor.cpu().numpy().transpose(1,2,0)\n",
223 | " b,g,r = cv2.split(image)\n",
224 | " image = cv2.merge((r,g,b))\n",
225 | " image = image*[0.22803, 0.22145, 0.216989] + [0.43216, 0.394666, 0.37645]\n",
226 | " image = image*255.0\n",
227 | " plt.imshow(image.astype(int))\n",
228 | " plt.show()"
229 | ],
230 | "execution_count": null,
231 | "outputs": []
232 | },
233 | {
234 | "cell_type": "code",
235 | "metadata": {
236 | "id": "J8YkC-vwzrkE",
237 | "colab_type": "code",
238 | "colab": {}
239 | },
240 | "source": [
241 | "#Code for making prediction\n",
242 | "im_size = 112\n",
243 | "mean=[0.485, 0.456, 0.406]\n",
244 | "std=[0.229, 0.224, 0.225]\n",
245 | "\n",
246 | "train_transforms = transforms.Compose([\n",
247 | " transforms.ToPILImage(),\n",
248 | " transforms.Resize((im_size,im_size)),\n",
249 | " transforms.ToTensor(),\n",
250 | " transforms.Normalize(mean,std)])\n",
251 | "path_to_videos = ['/content/drive/My Drive/Balanced_Face_only_data/aagfhgtpmv.mp4',\n",
252 | " '/content/drive/My Drive/Balanced_Face_only_data/aczrgyricp.mp4',\n",
253 | " '/content/drive/My Drive/Balanced_Face_only_data/agdkmztvby.mp4',\n",
254 | " '/content/drive/My Drive/Balanced_Face_only_data/abarnvbtwb.mp4']\n",
255 | "\n",
256 | "path_to_videos = ['/content/drive/My Drive/Youtube_Face_only_data/000_003.mp4',\n",
257 | " '/content/drive/My Drive/Youtube_Face_only_data/000.mp4',\n",
258 | " '/content/drive/My Drive/Youtube_Face_only_data/002_006.mp4',\n",
259 | " '/content/drive/My Drive/Youtube_Face_only_data/002.mp4'\n",
260 | " \n",
261 | "\n",
262 | "]\n",
263 | "\n",
264 | "path_to_videos= [\"/content/drive/My Drive/DFDC_REAL_Face_only_data/aabqyygbaa.mp4\"]\n",
265 | "\n",
266 | "video_dataset = validation_dataset(path_to_videos,sequence_length = 20,transform = train_transforms)\n",
267 | "model = Model(2).cuda()\n",
268 | "path_to_model = '/content/drive/My Drive/Models/model_87_acc_20_frames_final_data.pt'\n",
269 | "model.load_state_dict(torch.load(path_to_model))\n",
270 | "model.eval()\n",
271 | "for i in range(0,len(path_to_videos)):\n",
272 | " print(path_to_videos[i])\n",
273 | " prediction = predict(model,video_dataset[i],'./')\n",
274 | " if prediction[0] == 1:\n",
275 | " print(\"REAL\")\n",
276 | " else:\n",
277 | " print(\"FAKE\")"
278 | ],
279 | "execution_count": null,
280 | "outputs": []
281 | },
282 | {
283 | "cell_type": "code",
284 | "metadata": {
285 | "id": "YYV2DyoqBWJu",
286 | "colab_type": "code",
287 | "colab": {}
288 | },
289 | "source": [
290 | "#Optional : If you want to pass full frame for prediction instead of face cropped frame\n",
291 | "#code for full frame processing\n",
292 | "class validation_dataset(Dataset):\n",
293 | " def __init__(self,video_names,sequence_length = 60,transform = None):\n",
294 | " self.video_names = video_names\n",
295 | " self.transform = transform\n",
296 | " self.count = sequence_length\n",
297 | " def __len__(self):\n",
298 | " return len(self.video_names)\n",
299 | " def __getitem__(self,idx):\n",
300 | " video_path = self.video_names[idx]\n",
301 | " frames = []\n",
302 | " a = int(100/self.count)\n",
303 | " first_frame = np.random.randint(0,a) \n",
304 | " for i,frame in enumerate(self.frame_extract(video_path)):\n",
305 | " frames.append(self.transform(frame))\n",
306 | " if(len(frames) == self.count):\n",
307 | " break\n",
308 | " frames = torch.stack(frames)\n",
309 | " frames = frames[:self.count]\n",
310 | " return frames.unsqueeze(0)\n",
311 | " def frame_extract(self,path):\n",
312 | " vidObj = cv2.VideoCapture(path) \n",
313 | " success = 1\n",
314 | " while success:\n",
315 | " success, image = vidObj.read()\n",
316 | " if success:\n",
317 | " yield image"
318 | ],
319 | "execution_count": null,
320 | "outputs": []
321 | }
322 | ]
323 | }
--------------------------------------------------------------------------------
/Model Creation/Readme.md:
--------------------------------------------------------------------------------
1 | # Model Creation
2 | - You will be able to preprocess the dataset, train a pytorch model of your own, predict on new unseen data using your model.
3 |
4 |
5 | ### Note: We Recommend using [Google Colab](https://colab.research.google.com/) for running the above code.
6 |
7 |
8 | ## Dataset
9 | Some of the dataset we used are listed below:
10 | - [FaceForensics++](https://github.com/ondyari/FaceForensics)
11 | - [Celeb-DF](https://github.com/yuezunli/celeb-deepfakeforensics)
12 | - [Deepfake Detection Challenge](https://www.kaggle.com/c/deepfake-detection-challenge/data)
13 | ## Preprocessing
14 | - Load the dataset
15 | - Split the video into frames
16 | - crop the face from each frame
17 | - save the face cropped video
18 | ## Model and train
19 | - It will load the preprocessed video and labels from a csv file.
20 | - Create a pytorch model using transfer learning with RestNext50 and LSTM.
21 | - Split the data into train and test data
22 | - Train the model
23 | - Test the model
24 | - save the model in .pt file
25 | ## Predict
26 | - Load the saved pytorch model
27 | - Predict the output based in trained weights.
28 |
29 | ## Helpers
30 | - Code in the Helpers might be helpful for performing some important task like :
31 | - Converting Json label file to csv label
32 | - Copying files from one directory to another
33 | - Remove Audio altered files from Deepfake Detection Challenge dataset
34 | ## Helpful Link
35 | - Preprocessed data
36 | - [Celeb-DF Fake processed videos](https://drive.google.com/drive/folders/1SxCb_Wr7N4Wsc-uvjUl0i-6PpwYmwN65?usp=sharing)
37 | - [Celeb-DF Real processed videos](https://drive.google.com/drive/folders/1g97v9JoD3pCKA2TxHe8ZLRe4buX2siCQ?usp=sharing)
38 | - [FaceForensics++ Real and fake processed videos](https://drive.google.com/drive/folders/1VIIWRLs6VBXRYKODgeOU7i6votLPPxT0?usp=sharing)
39 | - [DFDC Fake processed videos](https://drive.google.com/drive/folders/1yz3DBeFJvZ_QzWsyY7EwBNm7fx4MiOfF?usp=sharing)
40 | - [DFDC Real processed videos](https://drive.google.com/drive/folders/1wN3ZOd0WihthEeH__Lmj_ENhoXJN6U11?usp=sharing)
41 |
42 | **Note:** Labels for all the above preprocessed data is under `/label/Gobal_metadata.csv`
43 |
44 | - Trained Models
45 | - You can just download our [trained models](https://drive.google.com/drive/folders/1UX8jXUXyEjhLLZ38tcgOwGsZ6XFSLDJ-?usp=sharing) and run the predict file for prediction.
46 |
47 | ***If you need any help regarding the please contact us. We will be happy to help***
48 |
--------------------------------------------------------------------------------
/Model Creation/preprocessing.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "nbformat": 4,
3 | "nbformat_minor": 0,
4 | "metadata": {
5 | "colab": {
6 | "name": "preprocessing.ipynb",
7 | "provenance": [],
8 | "collapsed_sections": []
9 | },
10 | "kernelspec": {
11 | "name": "python3",
12 | "display_name": "Python 3"
13 | },
14 | "accelerator": "GPU"
15 | },
16 | "cells": [
17 | {
18 | "cell_type": "markdown",
19 | "metadata": {
20 | "id": "nMI7CYxcyiUB",
21 | "colab_type": "text"
22 | },
23 | "source": [
24 | "Before running the file Upload all your data set on your goole drive in a zip format"
25 | ]
26 | },
27 | {
28 | "cell_type": "code",
29 | "metadata": {
30 | "id": "YjtnZQkTu6tX",
31 | "colab_type": "code",
32 | "colab": {}
33 | },
34 | "source": [
35 | "#Mount our google drive\n",
36 | "from google.colab import drive\n",
37 | "drive.mount('/content/drive')"
38 | ],
39 | "execution_count": null,
40 | "outputs": []
41 | },
42 | {
43 | "cell_type": "code",
44 | "metadata": {
45 | "id": "f4y_fGlmur4v",
46 | "colab_type": "code",
47 | "colab": {}
48 | },
49 | "source": [
50 | "#before running this please change the RUNTIME to GPU (Runtime -> Change runtime type -> set harware accelarotor as GPU)\n",
51 | "#download and unzip the data from google drive Colab environment\n",
52 | "from google_drive_downloader import GoogleDriveDownloader as gdd\n",
53 | "#use only file id of the link\n",
54 | "#Note: Below link is just an example, Not an actual link. Actual Links are in ReadMe file\n",
55 | "#https://drive.google.com/file/d/1ubvKLzBDe5i1acxgGUK6ObeNBYCKUS07/view?usp=sharing\n",
56 | "url = '1ubvKLzBDe5i1acxgGUK6ObeNBYCKUS07'\n",
57 | "gdd.download_file_from_google_drive(file_id = url,dest_path='./data.zip',unzip=True)"
58 | ],
59 | "execution_count": null,
60 | "outputs": []
61 | },
62 | {
63 | "cell_type": "code",
64 | "metadata": {
65 | "id": "1f40EeRuvAkO",
66 | "colab_type": "code",
67 | "colab": {}
68 | },
69 | "source": [
70 | "#To get the average frame count \n",
71 | "import json\n",
72 | "import glob\n",
73 | "import numpy as np\n",
74 | "import cv2\n",
75 | "import copy\n",
76 | "#change the path accordingly\n",
77 | "video_files = glob.glob('/content/Real videos/*.mp4')\n",
78 | "#video_files1 = glob.glob('/content/dfdc_train_part_0/*.mp4')\n",
79 | "#video_files += video_files1\n",
80 | "frame_count = []\n",
81 | "for video_file in video_files:\n",
82 | " cap = cv2.VideoCapture(video_file)\n",
83 | " if(int(cap.get(cv2.CAP_PROP_FRAME_COUNT))<150):\n",
84 | " video_files.remove(video_file)\n",
85 | " continue\n",
86 | " frame_count.append(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)))\n",
87 | "print(\"frames\" , frame_count)\n",
88 | "print(\"Total number of videos: \" , len(frame_count))\n",
89 | "print('Average frame per video:',np.mean(frame_count))"
90 | ],
91 | "execution_count": null,
92 | "outputs": []
93 | },
94 | {
95 | "cell_type": "code",
96 | "metadata": {
97 | "id": "U92Ovn3JvV52",
98 | "colab_type": "code",
99 | "colab": {}
100 | },
101 | "source": [
102 | "# to extract frame\n",
103 | "def frame_extract(path):\n",
104 | " vidObj = cv2.VideoCapture(path) \n",
105 | " success = 1\n",
106 | " while success:\n",
107 | " success, image = vidObj.read()\n",
108 | " if success:\n",
109 | " yield image\n",
110 | "!pip3 install face_recognition\n",
111 | "!mkdir '/content/drive/My Drive/FF_REAL_Face_only_data'\n",
112 | "import torch\n",
113 | "import torchvision\n",
114 | "from torchvision import transforms\n",
115 | "from torch.utils.data import DataLoader\n",
116 | "from torch.utils.data.dataset import Dataset\n",
117 | "import os\n",
118 | "import numpy as np\n",
119 | "import cv2\n",
120 | "import matplotlib.pyplot as plt\n",
121 | "import face_recognition\n",
122 | "from tqdm.autonotebook import tqdm\n",
123 | "# process the frames\n",
124 | "def create_face_videos(path_list,out_dir):\n",
125 | " already_present_count = glob.glob(out_dir+'*.mp4')\n",
126 | " print(\"No of videos already present \" , len(already_present_count))\n",
127 | " for path in tqdm(path_list):\n",
128 | " out_path = os.path.join(out_dir,path.split('/')[-1])\n",
129 | " file_exists = glob.glob(out_path)\n",
130 | " if(len(file_exists) != 0):\n",
131 | " print(\"File Already exists: \" , out_path)\n",
132 | " continue\n",
133 | " frames = []\n",
134 | " flag = 0\n",
135 | " face_all = []\n",
136 | " frames1 = []\n",
137 | " out = cv2.VideoWriter(out_path,cv2.VideoWriter_fourcc('M','J','P','G'), 30, (112,112))\n",
138 | " for idx,frame in enumerate(frame_extract(path)):\n",
139 | " #if(idx % 3 == 0):\n",
140 | " if(idx <= 150):\n",
141 | " frames.append(frame)\n",
142 | " if(len(frames) == 4):\n",
143 | " faces = face_recognition.batch_face_locations(frames)\n",
144 | " for i,face in enumerate(faces):\n",
145 | " if(len(face) != 0):\n",
146 | " top,right,bottom,left = face[0]\n",
147 | " try:\n",
148 | " out.write(cv2.resize(frames[i][top:bottom,left:right,:],(112,112)))\n",
149 | " except:\n",
150 | " pass\n",
151 | " frames = []\n",
152 | " try:\n",
153 | " del top,right,bottom,left\n",
154 | " except:\n",
155 | " pass\n",
156 | " out.release()"
157 | ],
158 | "execution_count": null,
159 | "outputs": []
160 | },
161 | {
162 | "cell_type": "code",
163 | "metadata": {
164 | "id": "sF5qiWGLvei-",
165 | "colab_type": "code",
166 | "colab": {}
167 | },
168 | "source": [
169 | "create_face_videos(video_files,'/content/drive/My Drive/FF_REAL_Face_only_data/')"
170 | ],
171 | "execution_count": null,
172 | "outputs": []
173 | }
174 | ]
175 | }
176 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Deepfake detection using Deep Learning (ResNext and LSTM)
2 |
3 | ## Give a Star⭐ to repo and Don't forget to
4 |
5 |
6 |
7 | ## Latest Update
8 | #### We have dockerised the [Django Application](https://github.com/abhijitjadhav1998/Deepfake_detection_using_deep_learning/tree/master/Django%20Application) now you can spin up a container within seconds without worring about dependencies
9 |
10 |
11 | ## 1. Introduction
12 | This projects aims in detection of video deepfakes using deep learning techniques like ResNext and LSTM. We have achived deepfake detection by using transfer learning where the pretrained ResNext CNN is used to obtain a feature vector, further the LSTM layer is trained using the features. For more details follow the [documentaion](https://github.com/abhijitjadhav1998/Deepfake_detection_using_deep_learning/tree/master/Documentation).
13 |
14 | You can also watch [this Youtube video](https://www.youtube.com/watch?v=_q16aJTXVRE) to get a better intuition about the project.
15 | You can watch [this playList](https://www.youtube.com/watch?v=quJ8Rv84oA0&list=PLNIj0dkfMA1FsD5xR4IEc8vdwr66_WExl) for step by step installation.
16 |
17 | You can read more about the project on
18 |
19 |
20 | ## 2. Directory Structure
21 | For ease of understanding the project is structured in below format
22 | ```
23 | Deepfake_detection_using_deep_learning
24 | |
25 | |--- Django Application
26 | |--- Model Creation
27 | |--- Documentaion
28 | ```
29 | 1. Django Application
30 | - This directory consists of the django made application of our work. Where a user can upload the video and submit it to the model for prediction. The trained model performs the prediction and the result is displayed on the screen.
31 | 2. Model Creation
32 | - This directory consists of the step by step process of creating and training a deepfake detection model using our approach.
33 | 3. Documentation
34 | - This directory consists of all the documentation done during the project
35 |
36 | ## 3. System Architecture
37 |
38 |
39 |
40 |
41 | ## 4. Demo
42 | ### You can watch the [youtube video](https://www.youtube.com/watch?v=_q16aJTXVRE&t=823s) for demo
43 |
44 |
45 |
46 |
47 |
48 | ## 5. Our Results
49 |
50 | | Model Name | No of videos | No of Frames | Accuracy |
51 | |------------|--------------|--------------|----------|
52 | |model_84_acc_10_frames_final_data.pt |6000 |10 |84.21461|
53 | |model_87_acc_20_frames_final_data.pt | 6000 |20 |87.79160|
54 | |model_89_acc_40_frames_final_data.pt | 6000| 40 |89.34681|
55 | |model_90_acc_60_frames_final_data.pt | 6000| 60 |90.59097 |
56 | |model_91_acc_80_frames_final_data.pt | 6000 | 80 | 91.49818 |
57 | |model_93_acc_100_frames_final_data.pt| 6000 | 100 | 93.58794|
58 |
59 | ## 6. Contributors
60 |
61 |
62 |
63 |
64 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
84 |
85 | #### 6.1 Offline Contributors
86 | 1. Jay Patel
87 | 2. Hitendra Patil
88 | 3. Abhishek Patange
89 |
90 | ## 7. License
91 |
92 | [](https://www.gnu.org/licenses/gpl-3.0)
93 |
94 | ## 8. We welcome Open Source Contribution.
95 | ### Below are the some changes that can be applied to the project. New Ideas will be appreciated.
96 | - [ ] Deploying the applications in free cloud
97 | - [ ] Creating open source API for detection
98 | - [ ] Batch processing of entire video instead of processing first 'x' frames.
99 | - [ ] Optimizing the code for faster execution.
100 | #### Completed
101 | - [X] Dockerizing the app
102 | - [X] Enabling working of project on Non Cuda Computers. i.e on normal or AMD GPUs
103 |
104 | ## 9. Dont forget to Star⭐ this repo 😉 and
105 |
--------------------------------------------------------------------------------
/github_assets/System Architecture.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/abhijithjadhav/Deepfake_detection_using_deep_learning/534f0e9915754b3a21eb1a838925f60d90f2866c/github_assets/System Architecture.png
--------------------------------------------------------------------------------
/github_assets/fake gif.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/abhijithjadhav/Deepfake_detection_using_deep_learning/534f0e9915754b3a21eb1a838925f60d90f2866c/github_assets/fake gif.gif
--------------------------------------------------------------------------------
/github_assets/fakegif.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/abhijithjadhav/Deepfake_detection_using_deep_learning/534f0e9915754b3a21eb1a838925f60d90f2866c/github_assets/fakegif.gif
--------------------------------------------------------------------------------