├── .idea
├── .gitignore
├── Explainable_Efficient_ ViT_DFD.iml
├── deployment.xml
├── inspectionProfiles
│ ├── Project_Default.xml
│ └── profiles_settings.xml
├── misc.xml
├── modules.xml
├── runConfigurations
│ ├── explain_model.xml
│ ├── test_model.xml
│ ├── test_model10_Face2Face.xml
│ ├── test_model10_FaceShifter.xml
│ ├── test_model10_FaceSwap.xml
│ ├── test_model10_NeuralTextures.xml
│ ├── test_model10_Original.xml
│ ├── train_8.xml
│ └── train_model.xml
└── vcs.xml
├── README.md
├── deep_fakes_explain
└── models
│ └── efficientnetB0_checkpoint89_All
├── environment.yml
├── explain_model
├── baselines
│ └── EfficientViT
│ │ ├── evit_model10.py
│ │ ├── explained_architecture.yaml
│ │ ├── transforms
│ │ └── albu.py
│ │ └── utils.py
├── explain_model.py
├── samples_list_All_efficientnetB0_checkpoint89_All_refac.csv
└── samples_list_few_efficientnetB0_checkpoint89_All_refac.csv
├── model_test_train
├── Copy_selected_images
├── configs
│ └── explained_architecture.yaml
├── deepfakes_dataset.py
├── evit_model10.py
├── results
│ └── tests
│ │ ├── All_efficientnetB0_checkpoint89_All_labels.txt
│ │ ├── Deepfakes_efficientnetB0_checkpoint89_All_labels.txt
│ │ ├── Face2Face_efficientnetB0_checkpoint89_All_labels.txt
│ │ ├── FaceShifter_efficientnetB0_checkpoint89_All_labels.txt
│ │ ├── FaceSwap_efficientnetB0_checkpoint89_All_labels.txt
│ │ ├── NeuralTextures_efficientnetB0_checkpoint89_All_labels.txt
│ │ ├── efficientnetB0_checkpoint89_All_All_acc94.0099833610649_loss0.42291456_f10.9631901840490797.jpg
│ │ ├── efficientnetB0_checkpoint89_All_Deepfakes_acc93.0_loss0.5777812_f10.9292929292929293.jpg
│ │ ├── efficientnetB0_checkpoint89_All_Face2Face_acc96.0_loss0.56776446_f10.9607843137254902.jpg
│ │ ├── efficientnetB0_checkpoint89_All_FaceShifter_acc96.0_loss0.5703947_f10.9607843137254902.jpg
│ │ ├── efficientnetB0_checkpoint89_All_FaceSwap_acc96.5_loss0.56512296_f10.9658536585365853.jpg
│ │ ├── efficientnetB0_checkpoint89_All_NeuralTextures_acc88.55_loss0.5907205_f10.87958.jpg
│ │ ├── samples_list_All_efficientnetB0_checkpoint89_All.csv
│ │ ├── samples_list_All_efficientnetB0_checkpoint89_All_refac.csv
│ │ ├── samples_list_Deepfakes_efficientnetB0_checkpoint89_All.csv
│ │ ├── samples_list_Face2Face_efficientnetB0_checkpoint89_All.csv
│ │ ├── samples_list_FaceShifter_efficientnetB0_checkpoint89_All.csv
│ │ ├── samples_list_FaceSwap_efficientnetB0_checkpoint89_All.csv
│ │ └── samples_list_NeuralTextures_efficientnetB0_checkpoint89_All.csv
├── test_model.py
├── train_model.py
├── transforms
│ └── albu.py
└── utils.py
└── preprocessing
├── detect_faces.py
├── extract_crops.py
├── face_detector.py
└── utils.py
/.idea/.gitignore:
--------------------------------------------------------------------------------
1 | # Default ignored files
2 | /shelf/
3 | /workspace.xml
4 | # Editor-based HTTP Client requests
5 | /httpRequests/
6 | # Datasource local storage ignored files
7 | /dataSources/
8 | /dataSources.local.xml
9 |
--------------------------------------------------------------------------------
/.idea/Explainable_Efficient_ ViT_DFD.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/.idea/deployment.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
--------------------------------------------------------------------------------
/.idea/inspectionProfiles/Project_Default.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
--------------------------------------------------------------------------------
/.idea/inspectionProfiles/profiles_settings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/.idea/runConfigurations/explain_model.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
--------------------------------------------------------------------------------
/.idea/runConfigurations/test_model.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
--------------------------------------------------------------------------------
/.idea/runConfigurations/test_model10_Face2Face.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
--------------------------------------------------------------------------------
/.idea/runConfigurations/test_model10_FaceShifter.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
--------------------------------------------------------------------------------
/.idea/runConfigurations/test_model10_FaceSwap.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
--------------------------------------------------------------------------------
/.idea/runConfigurations/test_model10_NeuralTextures.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
--------------------------------------------------------------------------------
/.idea/runConfigurations/test_model10_Original.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
--------------------------------------------------------------------------------
/.idea/runConfigurations/train_8.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
--------------------------------------------------------------------------------
/.idea/runConfigurations/train_model.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
--------------------------------------------------------------------------------
/.idea/vcs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Explainable Attention Based Deepfake Detector
2 | The project consist a Video Deepfake detector based on hybrid EfficientNet CNN and Vision Transformer architecture. The model inference results can be analyzed and explained by rendering a (heatmap) visualization based on a Relevancy map calculated from the Attention layers of the Transformer, overlayed on the input face image.
3 |
4 | 
5 |
6 | In addition, the project enables to re-train and test the model performence and explainability with new parameters.
7 |
8 | ## How to Install
9 | - [ ] Clone the repository and move into it:
10 | ```
11 | git clone https://github.com/noame12/Explainable_Attention_Based_Deepfake_Detector.git
12 | cd Explainable_Attention_Based_Deepfake_Detector
13 | ```
14 | - [ ] Setup Python environment using Conda:
15 | ```
16 | conda env create --file environment.yml
17 | conda activate explain_deepfakes
18 | export PYTHONPATH=.
19 | ```
20 | ## Run the Explanation process on the deepfake detection model
21 | **System requirements:**
22 | To run the explanability process on more than 5 face images, a machine with Tesla T4 (or stronger) GPU is required.
23 |
24 | - [ ] Move to model explanation directory:
25 | ```
26 | cd explain_model
27 | ```
28 |
29 | - [ ] Create an input directory for the input face images:
30 | ```
31 | mkdir examples
32 | ```
33 | - [ ] Download the face images from the [samples](https://drive.google.com/drive/folders/1-JtWGMyd7YaTa56R6uYpjvwmUyW5q-zN?usp=sharing) drive into the newly created local _'examples'_ directory.
34 |
35 | The samples drive contains 600 samples of face image extractions from 600 test videos. It consists of 100 images for each of the five deepfake methods – Face2Face, FaceShift, FaceSwap, NeuralTextures and Deepfakes, as well as 100 untouched real (aka Original) face images.
36 |
37 | An exhaustive list of the face image files for running the explainability method is provided in [samples_list_All_efficientnetB0_checkpoint89_All_refac.csv](https://github.com/noame12/Explainable_Attention_Based_Deepfake_Detector/blob/master/explain_model/samples_list_All_efficientnetB0_checkpoint89_All_refac.csv) file in the _'explain_model'_ directory. To run the test on a subset of the list, extract a customized list from the exhaustive list.
38 |
39 | **!Note:** Make sure to keep the same .csv file name or update the name in the _explain_model.py_ file (line 111) prior to running the _explain_model.py_ module.
40 |
41 | - [ ] Run the explanation visualization process:
42 | ```
43 | python explain_model.py
44 | ```
45 |
46 | The output of the explanation process can be viewed in the _‘explanation’_ directory (created automatically)
47 | 
48 |
49 |
50 | The results of the explanability process run on all examples in advance can be seen in the [visualization results drive](https://drive.google.com/drive/folders/1fxi-ilXykkq-RXwbNRtrwdicxKROrHae?usp=sharing) .
51 |
52 |
53 |
54 | ## Test the deepfake detection model
55 | The test module enables to test the performance of the deepfake detector.
56 | The input data to the model is the test (or verification) dataset of face images extracted from the fake and real video sequences.
57 | The test process generates four outputs:
58 | - Accuracy, AUC (Area Under Curve) and F1 scores of the classifier
59 | - ROC diagram
60 | - A .txt file with the classification results for each video sequence
61 | - A .csv list of face image files – one sample per each video.
62 |
63 | **System requirements:**
64 | To run the test process, a machine with **two** Tesla T4 (or stronger) GPUs is required.
65 |
66 |
67 | 
68 |
69 | ### Get the data
70 | - [ ] Download and extract the dataset:
71 | [FaceForensic++](https://github.com/ondyari/FaceForensics/blob/master/dataset/)
72 |
73 | The videos should be downloaded under _'/deep_fakes_exaplain/dataset'_ directory.
74 |
75 | ### Preprocess the data
76 | To perform deepfake detection it is first necessary to identify and extract the faces from all the videos in the dataset.
77 |
78 | - [ ] Detect the faces inside the videos:
79 | ```
80 | cd preprocessing
81 | ```
82 | ```
83 | python detect_faces.py --data_path /deep_fakes_exaplain/dataset --dataset: FACEFORENSICS
84 | ```
85 | **!Note:** The default dataset for the detect_faces.py module is DFDC, therefore it is important to specify the --dataset parameter as described above.
86 |
87 | The detected face boxes (coordinates) will be saved inside the "/deep_fakes_exaplain/dataset/boxes" folder.
88 | 
89 |
90 |
91 | - [ ] Extract the detected faces obtaining the images:
92 | ```
93 | python extract_crops.py --data_path deep_fakes_explain/dataset --output_path deep_fakes_explain/dataset/training_set
94 | --dataset FACEFORENSIC
95 | ```
96 | Repeat detection and extraction for all the different parts of your dataset. The --output_path parameter above is set to the training_set directory. You should repeat the process also for the validation_set and test_set directories.
97 | The folders’ structure should look as follows:
98 | 
99 |
100 | Each (fake) method directory contain directories for all videos. Each video directory contain all face extraction files, for that video, in .png format.
101 |
102 | ```
103 | - training_set
104 | - Deepfakes
105 | - video_name_0
106 | 0_0.png
107 | 1_0.png
108 | ...
109 | N_0.png
110 | ...
111 | - video_name_K
112 | 0_0.png
113 | 1_0.png
114 | ...
115 | M_0.png
116 | - Face2Face
117 | - FaceShifter
118 | - FaceSwap
119 | - NeuralTextures
120 | - Original
121 | - validation_set
122 | ...
123 | ...
124 | ...
125 | - test_set
126 | ...
127 | ...
128 | ```
129 | ### Test the model
130 | - [ ] Move into the test module folder:
131 | ```
132 | cd model_test_train
133 | ```
134 | - [ ] Run the following command for evaluating the deepfake detector model providing the pre-trained model path and the configuration file available in the config directory:
135 | ```
136 | python test_model.py --model_path ../deep_fakes_explain/models/efficientnetB0_checkpoint89_All --config configs/explained_architecture.yaml
137 | ```
138 | By default, the command will test on All datasets but you can customize the following parameters:
139 | - --dataset: Which dataset to use (Deepfakes|Face2Face|FaceShifter|FaceSwap|NeuralTextures|Original|All)
140 | - --workers: Number of data loader workers (default: 16)
141 | - --frames_per_video: Number of equidistant frames for each video (default: 20)
142 | - --batch_size: Prediction Batch Size (default: 12)
143 |
144 | The results of the test process are saved in the _'results/tests'_ directory.
145 |
146 | ## Train the model
147 | The train module enables to re-train the model with different parameters. Re-training may be desired for verifying or testing any thesis for improving model performance or explainability.
148 |
149 | To evaluate a customized model trained from scratch with a different architecture, you need to edit the configs/explained_architecture.yaml file.
150 |
151 | **System requirements:**
152 | A machine with **two** Tesla T4 (or stronger) GPUs, CPU with 16 vCPUs and 100G RAM.
153 |
154 | To train the model using my architecture configuration:
155 | - [ ] Verify that you are in _‘model_test_train’_ directory
156 | - [ ] Run the train module
157 | ```
158 | python train_model.py --config configs/explained_architecture.yaml
159 | ```
160 | By default the command will train on All method datasets but you can customize the following parameters:
161 | - --num_epochs: Number of training epochs (default: 100)
162 | - --workers: Number of data loader workers (default: 16)
163 | - --resume: Path to latest checkpoint (default: none)
164 | - --dataset: Which dataset to use (Deepfakes|Face2Face|FaceShifter|FaceSwap|NeuralTextures|All) (default: All)
165 | - --max_videos: Maximum number of videos to use for training (default: all)
166 | - --patience: How many epochs wait before stopping for validation loss not improving (default: 5)
167 |
168 | ## Credits
169 | - The Deepfake Detector implementation is based on the [Hybrid EfficientNet Vision Transformer](https://github.com/davide-coccomini/Combining-EfficientNet-and-Vision-Transformers-for-Video-Deepfake-Detection) implementation.
170 | - The explainability method is based on the [Transformer MM Explainability](https://github.com/hila-chefer/Transformer-MM-Explainability) implementation.
171 |
--------------------------------------------------------------------------------
/deep_fakes_explain/models/efficientnetB0_checkpoint89_All:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/noame12/Explainable_Attention_Based_Deepfake_Detector/96ad106528ca46da1f456a886739d9f38960027f/deep_fakes_explain/models/efficientnetB0_checkpoint89_All
--------------------------------------------------------------------------------
/environment.yml:
--------------------------------------------------------------------------------
1 | name: explain_deepfakes
2 | channels:
3 | - defaults
4 | dependencies:
5 | - defaults/linux-64::_libgcc_mutex==0.1=main
6 | - defaults/linux-64::blas==1.0=mkl
7 | - ca-certificates
8 | - defaults/linux-64::intel-openmp==2020.2=254
9 | - defaults/linux-64::ld_impl_linux-64==2.33.1=h53a641e_7
10 | - defaults/linux-64::libgfortran4==7.5.0=ha8ba4b0_17
11 | - defaults/linux-64::libstdcxx-ng==9.1.0=hdf63c60_0
12 | - defaults/linux-64::pandoc==2.12=h06a4308_0
13 | - defaults/linux-64::libgcc-ng==9.1.0=hdf63c60_0
14 | - defaults/linux-64::libgfortran-ng==7.5.0=ha8ba4b0_17
15 | - defaults/linux-64::mkl==2020.2=256
16 | - conda-forge/label/gcc7/linux-64::bzip2==1.0.6=h14c3975_1002
17 | - defaults/linux-64::cudatoolkit==10.2.89=hfd86e86_1
18 | - conda-forge/label/gcc7/linux-64::graphite2==1.3.13=hf484d3e_1000
19 | - conda-forge/label/gcc7/linux-64::icu==58.2=hf484d3e_1000
20 | - conda-forge/label/gcc7/linux-64::jpeg==9c=h14c3975_1001
21 | - defaults/linux-64::libffi==3.3=he6710b0_2
22 | - conda-forge/label/gcc7/linux-64::libglu==9.0.0=hf484d3e_1000
23 | - conda-forge/label/gcc7/linux-64::libiconv==1.15=h14c3975_1004
24 | - defaults/linux-64::libopus==1.3.1=h7b6447c_0
25 | - defaults/linux-64::libsodium==1.0.18=h7b6447c_0
26 | - conda-forge/label/gcc7/linux-64::libuuid==2.32.1=h14c3975_1000
27 | - defaults/linux-64::libuv==1.40.0=h7b6447c_0
28 | - defaults/linux-64::libvpx==1.7.0=h439df22_0
29 | - defaults/linux-64::libxcb==1.14=h7b6447c_0
30 | - defaults/linux-64::ncurses==6.2=he6710b0_1
31 | - openssl
32 | - defaults/linux-64::pcre==8.45=h295c915_0
33 | - defaults/linux-64::pixman==0.40.0=h7f8727e_1
34 | - conda-forge/label/gcc7/linux-64::xorg-inputproto==2.3.2=h14c3975_1002
35 | - conda-forge/label/gcc7/linux-64::xorg-kbproto==1.0.7=h14c3975_1002
36 | - conda-forge/label/gcc7/linux-64::xorg-libxau==1.0.8=h14c3975_1006
37 | - conda-forge/label/gcc7/linux-64::xorg-xextproto==7.3.0=h14c3975_1002
38 | - conda-forge/label/gcc7/linux-64::xorg-xproto==7.0.31=h14c3975_1007
39 | - defaults/linux-64::xz==5.2.5=h7b6447c_0
40 | - conda-forge/linux-64::yaml==0.2.5=h516909a_0
41 | - defaults/linux-64::zlib==1.2.11=h7b6447c_3
42 | - defaults/linux-64::glib==2.69.1=h4ff587b_1
43 | - defaults/linux-64::hdf5==1.10.2=hba1933b_1
44 | - defaults/linux-64::libpng==1.6.37=hbc83047_0
45 | - conda-forge/label/gcc7/linux-64::libtiff==4.0.10=h648cc4a_1001
46 | - conda-forge/label/gcc7/linux-64::libxml2==2.9.8=h143f9aa_1005
47 | - defaults/linux-64::readline==8.1=h27cfd23_0
48 | - defaults/linux-64::tk==8.6.10=hbc83047_0
49 | - conda-forge/label/gcc7/linux-64::xorg-fixesproto==5.0=h14c3975_1002
50 | - conda-forge/label/gcc7/linux-64::xorg-libx11==1.6.6=h14c3975_1000
51 | - defaults/linux-64::zeromq==4.3.4=h2531618_0
52 | - defaults/linux-64::freetype==2.11.0=h70c0345_0
53 | - defaults/linux-64::sqlite==3.35.4=hdfb4753_0
54 | - conda-forge/label/gcc7/linux-64::xorg-libxext==1.3.3=h14c3975_1004
55 | - conda-forge/label/gcc7/linux-64::xorg-libxfixes==5.0.3=h14c3975_1004
56 | - defaults/linux-64::ffmpeg==4.0=hcdf2ecd_0
57 | - conda-forge/label/gcc7/linux-64::fontconfig==2.13.1=h2176d3f_1000
58 | - defaults/linux-64::python==3.7.10=hdb3f193_0
59 | - conda-forge/label/gcc7/linux-64::xorg-libxi==1.7.9=h14c3975_1002
60 | - defaults/linux-64::async_generator==1.10=py37h28b3542_0
61 | - defaults/noarch::attrs==20.3.0=pyhd3eb1b0_0
62 | - defaults/noarch::backcall==0.2.0=pyhd3eb1b0_0
63 | - defaults/linux-64::cairo==1.16.0=hf32fb01_1
64 | - defaults/noarch::decorator==5.1.0=pyhd3eb1b0_0
65 | - defaults/noarch::defusedxml==0.7.1=pyhd3eb1b0_0
66 | - defaults/linux-64::entrypoints==0.3=py37_0
67 | - conda-forge/label/gcc7/linux-64::freeglut==3.0.0=hf484d3e_1005
68 | - defaults/noarch::ipython_genutils==0.2.0=pyhd3eb1b0_1
69 | - conda-forge/noarch::keras==2.7.0=pyhd8ed1ab_0
70 | - defaults/linux-64::markupsafe==1.1.1=py37h14c3975_1
71 | - defaults/linux-64::mistune==0.8.4=py37h14c3975_1001
72 | - defaults/noarch::nest-asyncio==1.5.1=pyhd3eb1b0_0
73 | - defaults/linux-64::ninja==1.10.2=py37hd09550d_3
74 | - defaults/linux-64::pandocfilters==1.4.3=py37h06a4308_1
75 | - defaults/noarch::parso==0.8.2=pyhd3eb1b0_0
76 | - defaults/noarch::pickleshare==0.7.5=pyhd3eb1b0_1003
77 | - defaults/noarch::prometheus_client==0.10.0=pyhd3eb1b0_0
78 | - defaults/noarch::ptyprocess==0.7.0=pyhd3eb1b0_2
79 | - defaults/noarch::pycparser==2.20=py_2
80 | - defaults/noarch::pyparsing==2.4.7=pyhd3eb1b0_0
81 | - defaults/linux-64::pyrsistent==0.17.3=py37h7b6447c_0
82 | - conda-forge/linux-64::python_abi==3.7=2_cp37m
83 | - defaults/linux-64::pyzmq==20.0.0=py37h2531618_1
84 | - defaults/noarch::send2trash==1.5.0=pyhd3eb1b0_1
85 | - defaults/linux-64::six==1.15.0=py37h06a4308_0
86 | - defaults/noarch::testpath==0.4.4=pyhd3eb1b0_0
87 | - defaults/linux-64::tornado==6.1=py37h27cfd23_0
88 | - defaults/noarch::typing_extensions==3.7.4.3=pyha847dfd_0
89 | - defaults/noarch::wcwidth==0.2.5=py_0
90 | - defaults/linux-64::webencodings==0.5.1=py37_1
91 | - defaults/noarch::wheel==0.36.2=pyhd3eb1b0_0
92 | - defaults/noarch::zipp==3.4.1=pyhd3eb1b0_0
93 | - certifi
94 | - defaults/linux-64::cffi==1.14.5=py37h261ae71_0
95 | - defaults/linux-64::harfbuzz==1.8.8=hffaf4a1_0
96 | - defaults/linux-64::importlib-metadata==3.10.0=py37h06a4308_0
97 | - defaults/linux-64::jasper==2.0.14=hd8c5072_2
98 | - defaults/linux-64::jedi==0.17.0=py37_0
99 | - defaults/linux-64::mkl-service==2.3.0=py37he8ac12f_0
100 | - defaults/noarch::packaging==20.9=pyhd3eb1b0_0
101 | - defaults/noarch::pexpect==4.8.0=pyhd3eb1b0_3
102 | - defaults/noarch::prompt-toolkit==3.0.17=pyh06a4308_0
103 | - defaults/noarch::python-dateutil==2.8.1=pyhd3eb1b0_0
104 | - conda-forge/linux-64::pyyaml==5.3.1=py37hb5d75c8_1
105 | - defaults/linux-64::terminado==0.9.4=py37h06a4308_0
106 | - defaults/noarch::traitlets==5.0.5=pyhd3eb1b0_0
107 | - defaults/linux-64::argon2-cffi==20.1.0=py37h27cfd23_1
108 | - defaults/noarch::importlib_metadata==3.10.0=hd3eb1b0_0
109 | - defaults/linux-64::jupyter_core==4.7.1=py37h06a4308_0
110 | - defaults/linux-64::libopencv==3.4.2=hb342d67_1
111 | - defaults/linux-64::numpy-base==1.19.2=py37hfa32c7d_0
112 | - defaults/linux-64::setuptools==52.0.0=py37h06a4308_0
113 | - defaults/noarch::bleach==3.3.0=pyhd3eb1b0_0
114 | - defaults/noarch::jinja2==2.11.3=pyhd3eb1b0_0
115 | - defaults/noarch::jsonschema==3.2.0=py_2
116 | - defaults/noarch::jupyter_client==6.1.12=pyhd3eb1b0_0
117 | - pip
118 | - defaults/noarch::pygments==2.8.1=pyhd3eb1b0_0
119 | - defaults/linux-64::ipython==7.22.0=py37hb070fc8_0
120 | - defaults/noarch::jupyterlab_pygments==0.1.2=py_0
121 | - defaults/noarch::nbformat==5.1.3=pyhd3eb1b0_0
122 | - defaults/linux-64::ipykernel==5.3.4=py37h5ca1d4c_0
123 | - defaults/noarch::nbclient==0.5.3=pyhd3eb1b0_0
124 | - defaults/linux-64::nbconvert==6.0.7=py37_0
125 | - defaults/linux-64::notebook==6.3.0=py37h06a4308_0
126 | - defaults/linux-64::nb_conda_kernels==2.3.1=py37h06a4308_0
127 | - nb_conda
128 | - conda-forge/linux-64::h5py==2.8.0=py37h3010b51_1003
129 | - defaults/linux-64::mkl_fft==1.3.0=py37h54f3939_0
130 | - defaults/linux-64::mkl_random==1.1.1=py37h0573a6f_0
131 | - defaults/linux-64::numpy==1.19.2=py37h54aff64_0
132 | - defaults/linux-64::py-opencv==3.4.2=py37hb342d67_1
133 | - pytorch/linux-64::pytorch==1.8.1=py3.7_cuda10.2_cudnn7.6.5_0
134 | - defaults/linux-64::scipy==1.6.2=py37h91f5cce_0
135 | - defaults/linux-64::opencv==3.4.2=py37h6fd60c2_1
136 | - conda-forge/noarch::mtcnn==0.1.1=pyhd8ed1ab_0
137 | - jupyter
138 | - scikit-image
139 | - python-utils
140 | - transformers
141 | prefix: /home/eshel_noam/miniconda3/envs/explain_deepfakes
142 |
--------------------------------------------------------------------------------
/explain_model/baselines/EfficientViT/evit_model10.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch import nn
3 | from einops import rearrange
4 | from efficientnet_pytorch import EfficientNet
5 | import cv2
6 | import re
7 | import numpy as np
8 | from torch import einsum
9 | from random import randint
10 |
11 |
12 | class Residual(nn.Module):
13 | def __init__(self, fn):
14 | super().__init__()
15 | self.fn = fn
16 |
17 | def forward(self, x, **kwargs):
18 | return self.fn(x, **kwargs) + x
19 |
20 | class PreNorm(nn.Module):
21 | def __init__(self, dim, fn):
22 | super().__init__()
23 | self.norm = nn.LayerNorm(dim)
24 | self.fn = fn
25 |
26 | def forward(self, x, **kwargs):
27 | return self.fn(self.norm(x), **kwargs)
28 |
29 | class FeedForward(nn.Module):
30 | def __init__(self, dim, hidden_dim, dropout = 0.):
31 | super().__init__()
32 | self.net = nn.Sequential(
33 | nn.Linear(dim, hidden_dim),
34 | nn.GELU(),
35 | nn.Dropout(dropout),
36 | nn.Linear(hidden_dim, dim),
37 | nn.Dropout(dropout)
38 | )
39 | def forward(self, x):
40 | return self.net(x)
41 |
42 | class Attention(nn.Module):
43 | def __init__(self, dim, heads = 8, dim_head = 64, dropout = 0.):
44 | super().__init__()
45 | inner_dim = dim_head * heads
46 | project_out = not (heads == 1 and dim_head == dim)
47 |
48 | self.heads = heads
49 | self.scale = dim_head ** -0.5
50 |
51 | self.attend = nn.Softmax(dim = -1)
52 | self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
53 |
54 | self.to_out = nn.Sequential(
55 | nn.Linear(inner_dim, dim),
56 | nn.Dropout(dropout)
57 | ) if project_out else nn.Identity()
58 |
59 | #FIXME: the following methods (up to line 75) where added to enable tapping into the attention maps and gradients
60 | # for calculation of the Relevancy
61 |
62 | self.attn_gradients = None
63 | self.attention_map = None
64 |
65 |
66 | def save_attn_gradients(self, attn_gradients):
67 | self.attn_gradients = attn_gradients
68 |
69 | def get_attn_gradients(self):
70 | return self.attn_gradients
71 |
72 | def save_attention_map(self, attention_map):
73 | self.attention_map = attention_map
74 |
75 | def get_attention_map(self):
76 | return self.attention_map
77 |
78 | def forward(self, x, register_hook=False):
79 | b, n, _, h = *x.shape, self.heads
80 | qkv = self.to_qkv(x).chunk(3, dim = -1)
81 | q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), qkv)
82 |
83 | dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale
84 |
85 | attn = self.attend(dots)
86 |
87 | out = einsum('b h i j, b h j d -> b h i d', attn, v)
88 |
89 | self.save_attention_map(attn) #FIXME: Added to save attention gradients
90 | if register_hook:
91 | attn.register_hook(self.save_attn_gradients)
92 |
93 | out = rearrange(out, 'b h n d -> b n (h d)')
94 | return self.to_out(out)
95 |
96 | #FIXME: the Block calss was added to facilitate the model explaination
97 | class Block(nn.Module):
98 | def __init__(self, dim, heads, dim_head, drop_out, mlp_dim, norm_layer=nn.LayerNorm):
99 | super().__init__()
100 | self.norm1 = norm_layer(dim)
101 | self.attn = Attention(dim, heads=heads, dim_head=dim_head, dropout=drop_out)
102 | self.norm2 = norm_layer(dim)
103 | self.mlp = FeedForward(dim=dim,hidden_dim=mlp_dim,dropout=0)
104 |
105 | def forward(self, x, register_hook=False):
106 | x = x + self.attn(self.norm1(x), register_hook=register_hook)
107 | x = x +self.mlp(self.norm2(x))
108 | return x
109 |
110 |
111 |
112 |
113 | class Transformer(nn.Module):
114 | def __init__(self, dim, depth, heads, dim_head, mlp_dim, dropout = 0.):
115 | super().__init__()
116 |
117 | self.blocks =nn.ModuleList([Block(dim =dim, heads = heads, dim_head = dim_head, drop_out = dropout, mlp_dim=mlp_dim)
118 | for i in range(depth)]) #FIXME: an alternatived definition of layers using blocks
119 |
120 |
121 | # self.layers = nn.ModuleList([])
122 | # for _ in range(depth):
123 | # self.layers.append(nn.ModuleList([
124 | # PreNorm(dim, Attention(dim, heads = heads, dim_head = dim_head, dropout = dropout)),
125 | # PreNorm(dim, FeedForward(dim=dim, hidden_dim=mlp_dim, dropout = 0))
126 | # ]))
127 |
128 | def forward (self, x, register_hook=False): #FIXME: changed the forward to use the stracture of blocks
129 | for blk in self.blocks:
130 | x = blk(x,register_hook=register_hook)
131 |
132 | # def forward(self, x):
133 | # for attn, ff in self.layers:
134 | # x = attn(x) + x
135 | # x = ff(x) + x
136 | return x
137 |
138 | class EfficientViT(nn.Module):
139 | def __init__(self, config, channels=512, selected_efficient_net = 0):
140 | super().__init__()
141 |
142 | image_size = config['model']['image-size']
143 | patch_size = config['model']['patch-size']
144 | num_classes = config['model']['num-classes']
145 | dim = config['model']['dim']
146 | depth = config['model']['depth']
147 | heads = config['model']['heads']
148 | mlp_dim = config['model']['mlp-dim']
149 | emb_dim = config['model']['emb-dim']
150 | dim_head = config['model']['dim-head']
151 | dropout = config['model']['dropout']
152 | emb_dropout = config['model']['emb-dropout']
153 |
154 | assert image_size % patch_size == 0, 'image dimensions must be divisible by the patch size'
155 |
156 | self.selected_efficient_net = selected_efficient_net
157 |
158 | if selected_efficient_net == 0:
159 | self.efficient_net = EfficientNet.from_pretrained('efficientnet-b0')
160 | else:
161 | self.efficient_net = EfficientNet.from_pretrained('efficientnet-b7')
162 | checkpoint = torch.load("weights/final_999_DeepFakeClassifier_tf_efficientnet_b7_ns_0_23", map_location="cpu")
163 | state_dict = checkpoint.get("state_dict", checkpoint)
164 | self.efficient_net.load_state_dict({re.sub("^module.", "", k): v for k, v in state_dict.items()}, strict=False)
165 |
166 | for i in range(0, len(self.efficient_net._blocks)):
167 | for index, param in enumerate(self.efficient_net._blocks[i].parameters()):
168 | if i >= len(self.efficient_net._blocks)-3:
169 | param.requires_grad = True
170 | else:
171 | param.requires_grad = False
172 |
173 |
174 | self.num_patches = (image_size // patch_size) ** 2 #FIXME: corrected the formula
175 | patch_dim = channels * patch_size ** 2
176 | self.emb_dim = emb_dim
177 | self.patch_size = patch_size
178 | efficientnet_output_size = channels * patch_size ** 2
179 | self.pos_embedding = nn.Parameter(torch.randn(1, self.num_patches + 1, emb_dim))
180 | # FIXME: Change the patch_to_embedding from simple linear projection that degenerated the input to the transformer
181 | # to one token, to support 1024 tokens
182 | self.patch_to_embedding = nn.Conv1d(in_channels=1, out_channels=self.num_patches, kernel_size= dim, stride=dim)
183 | self.cls_token = nn.Parameter(torch.randn(1, 1, self.emb_dim))
184 | self.dropout = nn.Dropout(emb_dropout)
185 | self.transformer = Transformer(dim = emb_dim, depth = depth, heads =heads, dim_head = dim_head, mlp_dim = mlp_dim, dropout =dropout)
186 |
187 | self.to_cls_token = nn.Identity()
188 |
189 | self.mlp_head = nn.Sequential(
190 | nn.Linear(emb_dim, mlp_dim),
191 | nn.ReLU(),
192 | nn.Linear(mlp_dim, num_classes) )
193 |
194 | def forward(self, img, mask=None, register_hook=False):
195 | p = self.patch_size
196 | x = self.efficient_net.extract_features(img) # 1280x7x7
197 |
198 | y = rearrange(x, 'b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = p, p2 = p)
199 | y = self.patch_to_embedding(y) #FIXME: changed the patch_to_embedding above
200 |
201 | cls_tokens = self.cls_token.expand(x.shape[0], -1, -1) # FIXME: corrected to align with new patch embedding
202 | x = torch.cat((cls_tokens, y), 1)
203 | shape=x.shape[0]
204 | x += self.pos_embedding[0:shape]
205 | x = self.dropout(x)
206 | x = self.transformer(x, register_hook)
207 | x = self.to_cls_token(x[:, 0])
208 |
209 | return self.mlp_head(x)
210 |
211 |
--------------------------------------------------------------------------------
/explain_model/baselines/EfficientViT/explained_architecture.yaml:
--------------------------------------------------------------------------------
1 | training:
2 | lr: 0.01
3 | weight-decay: 0.0000001
4 | bs: 16
5 | scheduler: 'steplr'
6 | gamma: 0.1
7 | step-size: 15
8 | rebalancing_fake: 0.3
9 | rebalancing_real: 1
10 | frames-per-video: 20 # Equidistant frames
11 |
12 | model:
13 | image-size: 224
14 | patch-size: 7
15 | num-classes: 1
16 | dim: 1280
17 | depth: 6
18 | dim-head: 64
19 | heads: 8
20 | mlp-dim: 2048
21 | emb-dim: 49
22 | dropout: 0.15
23 | emb-dropout: 0.15
24 |
--------------------------------------------------------------------------------
/explain_model/baselines/EfficientViT/transforms/albu.py:
--------------------------------------------------------------------------------
1 | import random
2 |
3 | import cv2
4 | import numpy as np
5 | from albumentations import DualTransform, ImageOnlyTransform
6 | from albumentations.augmentations.functional import crop
7 |
8 |
9 | def isotropically_resize_image(img, size, interpolation_down=cv2.INTER_AREA, interpolation_up=cv2.INTER_CUBIC):
10 | h, w = img.shape[:2]
11 |
12 | if max(w, h) == size:
13 | return img
14 | if w > h:
15 | scale = size / w
16 | h = h * scale
17 | w = size
18 | else:
19 | scale = size / h
20 | w = w * scale
21 | h = size
22 | interpolation = interpolation_up if scale > 1 else interpolation_down
23 |
24 | img = img.astype('uint8')
25 | resized = cv2.resize(img, (int(w), int(h)), interpolation=interpolation)
26 | return resized
27 |
28 |
29 | class IsotropicResize(DualTransform):
30 | def __init__(self, max_side, interpolation_down=cv2.INTER_AREA, interpolation_up=cv2.INTER_CUBIC,
31 | always_apply=False, p=1):
32 | super(IsotropicResize, self).__init__(always_apply, p)
33 | self.max_side = max_side
34 | self.interpolation_down = interpolation_down
35 | self.interpolation_up = interpolation_up
36 |
37 | def apply(self, img, interpolation_down=cv2.INTER_AREA, interpolation_up=cv2.INTER_CUBIC, **params):
38 | return isotropically_resize_image(img, size=self.max_side, interpolation_down=interpolation_down,
39 | interpolation_up=interpolation_up)
40 |
41 | def apply_to_mask(self, img, **params):
42 | return self.apply(img, interpolation_down=cv2.INTER_NEAREST, interpolation_up=cv2.INTER_NEAREST, **params)
43 |
44 | def get_transform_init_args_names(self):
45 | return ("max_side", "interpolation_down", "interpolation_up")
46 |
47 |
48 | class Resize4xAndBack(ImageOnlyTransform):
49 | def __init__(self, always_apply=False, p=0.5):
50 | super(Resize4xAndBack, self).__init__(always_apply, p)
51 |
52 | def apply(self, img, **params):
53 | h, w = img.shape[:2]
54 | scale = random.choice([2, 4])
55 | img = cv2.resize(img, (w // scale, h // scale), interpolation=cv2.INTER_AREA)
56 | img = cv2.resize(img, (w, h),
57 | interpolation=random.choice([cv2.INTER_CUBIC, cv2.INTER_LINEAR, cv2.INTER_NEAREST]))
58 | return img
59 |
60 | class RandomSizedCropNonEmptyMaskIfExists(DualTransform):
61 |
62 | def __init__(self, min_max_height, w2h_ratio=[0.7, 1.3], always_apply=False, p=0.5):
63 | super(RandomSizedCropNonEmptyMaskIfExists, self).__init__(always_apply, p)
64 |
65 | self.min_max_height = min_max_height
66 | self.w2h_ratio = w2h_ratio
67 |
68 | def apply(self, img, x_min=0, x_max=0, y_min=0, y_max=0, **params):
69 | cropped = crop(img, x_min, y_min, x_max, y_max)
70 | return cropped
71 |
72 | @property
73 | def targets_as_params(self):
74 | return ["mask"]
75 |
76 | def get_params_dependent_on_targets(self, params):
77 | mask = params["mask"]
78 | mask_height, mask_width = mask.shape[:2]
79 | crop_height = int(mask_height * random.uniform(self.min_max_height[0], self.min_max_height[1]))
80 | w2h_ratio = random.uniform(*self.w2h_ratio)
81 | crop_width = min(int(crop_height * w2h_ratio), mask_width - 1)
82 | if mask.sum() == 0:
83 | x_min = random.randint(0, mask_width - crop_width + 1)
84 | y_min = random.randint(0, mask_height - crop_height + 1)
85 | else:
86 | mask = mask.sum(axis=-1) if mask.ndim == 3 else mask
87 | non_zero_yx = np.argwhere(mask)
88 | y, x = random.choice(non_zero_yx)
89 | x_min = x - random.randint(0, crop_width - 1)
90 | y_min = y - random.randint(0, crop_height - 1)
91 | x_min = np.clip(x_min, 0, mask_width - crop_width)
92 | y_min = np.clip(y_min, 0, mask_height - crop_height)
93 |
94 | x_max = x_min + crop_height
95 | y_max = y_min + crop_width
96 | y_max = min(mask_height, y_max)
97 | x_max = min(mask_width, x_max)
98 | return {"x_min": x_min, "x_max": x_max, "y_min": y_min, "y_max": y_max}
99 |
100 | def get_transform_init_args_names(self):
101 | return "min_max_height", "height", "width", "w2h_ratio"
--------------------------------------------------------------------------------
/explain_model/baselines/EfficientViT/utils.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | from albumentations import Compose, PadIfNeeded
3 | from transforms.albu import IsotropicResize
4 | import numpy as np
5 | import os
6 | import cv2
7 | import torch
8 | from statistics import mean
9 |
10 | def transform_frame(image, image_size):
11 | transform_pipeline = Compose([
12 | IsotropicResize(max_side=image_size, interpolation_down=cv2.INTER_LINEAR, interpolation_up=cv2.INTER_LINEAR),
13 | PadIfNeeded(min_height=image_size, min_width=image_size, border_mode=cv2.BORDER_REPLICATE)
14 | ]
15 | )
16 | return transform_pipeline(image=image)['image']
17 |
18 |
19 | def resize(image, image_size):
20 | try:
21 | return cv2.resize(image, dsize=(image_size, image_size))
22 | except:
23 | return []
24 |
25 | def custom_round(values):
26 | result = []
27 | for value in values:
28 | if value > 0.55:
29 | result.append(1)
30 | else:
31 | result.append(0)
32 | return np.asarray(result)
33 |
34 | def custom_video_round(preds):
35 | for pred_value in preds:
36 | if pred_value > 0.55:
37 | return pred_value
38 | return mean(preds)
39 |
40 |
41 |
42 | def get_method(video, data_path):
43 | # methods = os.listdir(os.path.join(data_path, "manipulated_sequences"))
44 | # methods.extend(os.listdir(os.path.join(data_path, "original_sequences")))
45 | # methods.append("DFDC")
46 | # methods.append("Original")
47 | methods = os.listdir(os.path.join(data_path,"test_set"))
48 | selected_method = ""
49 | for method in methods:
50 | if method in video:
51 | selected_method = method
52 | break
53 | return selected_method
54 |
55 | def shuffle_dataset(dataset):
56 | import random
57 | random.seed(4)
58 | random.shuffle(dataset)
59 | return dataset
60 |
61 | def get_n_params(model):
62 | pp=0
63 | for p in list(model.parameters()):
64 | nn=1
65 | for s in list(p.size()):
66 | nn = nn*s
67 | pp += nn
68 | return pp
69 |
70 | def check_correct(preds, labels):
71 | preds = preds.cpu()
72 | labels = labels.cpu()
73 | preds = [np.asarray(torch.sigmoid(pred).detach().numpy()).round() for pred in preds]
74 |
75 | correct = 0
76 | positive_class = 0
77 | negative_class = 0
78 | for i in range(len(labels)):
79 | pred = int(preds[i])
80 | if labels[i] == pred:
81 | correct += 1
82 | if pred == 1:
83 | positive_class += 1
84 | else:
85 | negative_class += 1
86 | return correct, positive_class, negative_class
87 |
--------------------------------------------------------------------------------
/explain_model/explain_model.py:
--------------------------------------------------------------------------------
1 |
2 | import cv2
3 | import matplotlib.pyplot as plt
4 | import numpy as np
5 | import torch
6 | # import torchvision.transforms as transforms
7 | # import torch.nn as nn
8 | from torch.nn import AvgPool2d
9 | import yaml
10 | from PIL import Image
11 | import os
12 | import csv
13 | from albumentations import Compose,PadIfNeeded
14 | from baselines.EfficientViT.transforms.albu import IsotropicResize
15 |
16 |
17 |
18 |
19 | # rule 5 from paper
20 | def avg_heads(cam, grad):
21 | cam = cam.reshape(-1, cam.shape[-2], cam.shape[-1])
22 | grad = grad.reshape(-1, grad.shape[-2], grad.shape[-1])
23 | cam = grad * cam
24 | cam = cam.clamp(min=0).mean(dim=0)
25 | return cam
26 |
27 |
28 | # rule 6 from paper
29 | def apply_self_attention_rules(R_ss, cam_ss):
30 | R_ss_addition = torch.matmul(cam_ss, R_ss)
31 | return R_ss_addition
32 |
33 |
34 | def generate_relevance(model, input):
35 | output = model(input, register_hook=True)
36 | model.zero_grad()
37 | output.backward(retain_graph=True)
38 |
39 | num_tokens = model.transformer.blocks[0].attn.get_attention_map().shape[-1]
40 | R = torch.eye(num_tokens, num_tokens).cuda()
41 | # num_layers = 6
42 | # for i, blk in enumerate(model.transformer.blocks):
43 | # if i <= num_layers:
44 | # continue
45 | for blk in model.transformer.blocks:
46 | grad = blk.attn.get_attn_gradients()
47 | # g_view = grad.cpu().numpy()
48 | cam = blk.attn.get_attention_map()
49 | cam = avg_heads(cam, grad)
50 | R += apply_self_attention_rules(R.cuda(), cam.cuda())
51 | return R[0, 1:]
52 |
53 |
54 | from baselines.EfficientViT.evit_model10 import EfficientViT
55 |
56 |
57 | # normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
58 |
59 | def create_base_transform(size): #fixme: added from test evit
60 | return Compose([
61 | IsotropicResize(max_side=size, interpolation_down=cv2.INTER_AREA, interpolation_up=cv2.INTER_CUBIC),
62 | PadIfNeeded(min_height=size, min_width=size, border_mode=cv2.BORDER_CONSTANT),
63 | ])
64 |
65 | # create heatmap from mask on image
66 | def show_cam_on_image(img, mask):
67 | heatmap = cv2.applyColorMap(np.uint8(255 * mask), cv2.COLORMAP_JET)
68 | heatmap = np.float32(heatmap) / 255
69 | cam = heatmap + np.float32(img)
70 | cam = cam / np.max(cam)
71 | return cam
72 |
73 |
74 |
75 | BASE_DIR = '../deep_fakes_explain/'
76 | DATA_DIR = os.path.join(BASE_DIR, "dataset")
77 | TEST_DIR = os.path.join(DATA_DIR, "validation_set")
78 | MODELS_PATH = os.path.join(BASE_DIR, "models")
79 | EXAMPLES_DIR = 'examples'
80 | OUTPUT_DIR = 'explanation'
81 |
82 | # FIXME: Initialize the Efficientnet_ViT Deepfake Detector pretrained model
83 | config = 'baselines/EfficientViT/explained_architecture.yaml'
84 | with open(config, 'r') as ymlfile:
85 | config = yaml.safe_load(ymlfile)
86 | model_weights = os.path.join(MODELS_PATH,'efficientnetB0_checkpoint89_All') #TODO: update with the latest model
87 |
88 | model = EfficientViT(config=config, channels=1280, selected_efficient_net=0)
89 | model.load_state_dict(torch.load(model_weights))
90 | model.eval()
91 | model = model.cuda()
92 |
93 | if not os.path.exists(OUTPUT_DIR):
94 | os.makedirs(OUTPUT_DIR)
95 |
96 | down_sample= AvgPool2d(kernel_size=2)
97 |
98 | def generate_visualization(original_image):
99 | transformer_attribution = generate_relevance(model, original_image.unsqueeze(0).cuda()).detach()
100 | transformer_attribution = transformer_attribution.reshape(1, 1, 32, 32)
101 | transformer_attribution = down_sample(transformer_attribution)
102 | transformer_attribution = torch.nn.functional.interpolate(transformer_attribution, scale_factor=14, mode='bilinear')
103 | transformer_attribution = transformer_attribution.reshape(224, 224).cuda().data.cpu().numpy()
104 | transformer_attribution = (transformer_attribution - transformer_attribution.min()) / (
105 | transformer_attribution.max() - transformer_attribution.min())
106 | image_transformer_attribution = original_image.permute(1, 2, 0).data.cpu().numpy()
107 | image_transformer_attribution = (image_transformer_attribution - image_transformer_attribution.min()) / (
108 | image_transformer_attribution.max() - image_transformer_attribution.min())
109 | vis = show_cam_on_image(image_transformer_attribution, transformer_attribution)
110 | vis = np.uint8(255 * vis)
111 | vis = cv2.cvtColor(np.array(vis), cv2.COLOR_RGB2BGR)
112 | return vis
113 |
114 | #FIXME: added batch visulization based on the sameples file generated in the test
115 | with open('samples_list_few_efficientnetB0_checkpoint89_All_refac.csv') as csv_file: #TODO: verify the right file name
116 | csv_reader = csv.DictReader(csv_file, delimiter=',')
117 | line_count = 0
118 | image_filenames_list =[]
119 | videos_preds =[]
120 | labels_list = []
121 | for row in csv_reader:
122 | if line_count==0:
123 | line_count += 1
124 |
125 | image_filenames_list.append(row["example_file_name"])
126 | videos_preds.append(row["video_prob"])
127 | labels_list.append(row["label"])
128 | line_count +=1
129 |
130 | for i, file_name in enumerate(image_filenames_list):
131 | method = file_name.split('_')[0]
132 | full_path = os.path.join(EXAMPLES_DIR, file_name)
133 | image = Image.open(full_path)
134 | transform = create_base_transform(config['model']['image-size'])
135 | t_image = transform(image=cv2.imread(os.path.join(full_path)))['image']
136 |
137 | fig, axs = plt.subplots(1,2)
138 | axs[0].imshow(image);
139 | axs[0].axis('off');
140 |
141 | t_image = torch.tensor(np.asarray(t_image))
142 | t_image = np.transpose(t_image,(2, 0, 1)).float()
143 |
144 | pred_score = torch.sigmoid(model(t_image.unsqueeze(0).cuda()))
145 |
146 | label = 'Fake Image' if labels_list[i] == '1' else 'True Image'
147 | label = 'Label: ' + label
148 |
149 | if pred_score.item() > 0.55 and labels_list[i] == '1':
150 | result = 'TP'
151 | elif pred_score.item() > 0.55 and labels_list[i] == '0':
152 | result = 'FP'
153 | elif pred_score.item() < 0.55 and labels_list[i] == '0':
154 | result = 'TN'
155 | else:
156 | result = 'FN'
157 | video_pred_score = 'Video Pred Score: ' + str(videos_preds[i])[0:5]
158 | frame_pred_score = 'Frame Pred Score: ' + str(pred_score.item())[0:5]
159 | Title = label + '\n' + 'Method: ' + method + '\n' + video_pred_score + '\n' + frame_pred_score + '\nClassification: ' + result
160 |
161 | image_vis = generate_visualization(t_image)
162 |
163 | axs[1].imshow(image_vis);
164 | axs[1].axis('off');
165 |
166 | plt.suptitle(Title,ha='left', size='medium', x=0.4, y=0.92)
167 |
168 | fig.savefig(OUTPUT_DIR +'/vis_norm' + image_filenames_list[i])
169 | plt.close(fig)
170 |
171 |
172 |
173 |
--------------------------------------------------------------------------------
/explain_model/samples_list_few_efficientnetB0_checkpoint89_All_refac.csv:
--------------------------------------------------------------------------------
1 | video_name,label,video_prob,high_low_prob,example_file_name,example_file_name_short
2 | ../deep_fakes_explain/dataset/validation_set/Original/054,0,0.007896773,0.000347124,Original_054_296_0.png,296_0.png
3 | ../deep_fakes_explain/dataset/validation_set/Original/040,0,0.14705108,0.00122538,Original_040_211_0.png,211_0.png
4 | ../deep_fakes_explain/dataset/validation_set/Face2Face/067_025,1,0.9307484,0.9999294,Face2Face_067_025_667_0.png,667_0.png
5 | ../deep_fakes_explain/dataset/validation_set/Face2Face/080_061,1,0.99948466,0.99994504,Face2Face_080_061_220_0.png,220_0.png
6 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/071_054,1,0.9962616,0.9127761,FaceShifter_071_054_216_0.png,216_0.png
7 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/063_041,1,0.6419253,0.9987355,FaceShifter_063_041_917_0.png,917_0.png
8 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/070_057,1,0.99993753,0.5407529,FaceSwap_070_057_81_0.png,81_0.png
9 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/012_026,1,0.9998897,0.99995625,FaceSwap_012_026_202_0.png,202_0.png
10 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/040_997,1,0.9535204,0.999783,NeuralTextures_040_997_216_0.png,216_0.png
11 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/027_009,1,0.7497221,0.37248436,NeuralTextures_027_009_220_0.png,220_0.png
12 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/034_590,1,0.9942188,0.9999249,Deepfakes_034_590_296_0.png,296_0.png
13 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/062_066,1,0.99206984,0.9999113,Deepfakes_062_066_232_0.png,232_0.png
14 |
--------------------------------------------------------------------------------
/model_test_train/Copy_selected_images:
--------------------------------------------------------------------------------
1 | import os
2 | from shutil import copyfile
3 | import csv
4 |
5 | file_name = '/home/eshel_noam/Explainable_Efficient_ViT_DFD/model_test_train/results/tests/samples_list_All_efficientnetB0_checkpoint89_All.csv'
6 | destination_path = "/home/eshel_noam/Explainable_Efficient_ViT_DFD/explain_model/examples"
7 |
8 | with open(file_name) as csv_file: #TODO: verify the right file name
9 | csv_reader = csv.DictReader(csv_file, delimiter=',')
10 | line_count = 0
11 |
12 | for row in csv_reader:
13 | if line_count==0:
14 | line_count += 1
15 |
16 | else:
17 | src_file = os.path.join(row["video_name"],row["example_file_name"])
18 | y = src_file.split('/')
19 | video_name = y[-2]
20 | method_name = y[-3]
21 | img_file_name = method_name +'_' + video_name + '_' + row["example_file_name"]
22 | dst_name = os.path.join(destination_path,img_file_name )
23 | copyfile(src_file,dst_name)
24 | line_count += 1
25 |
--------------------------------------------------------------------------------
/model_test_train/configs/explained_architecture.yaml:
--------------------------------------------------------------------------------
1 | training:
2 | lr: 0.01
3 | weight-decay: 0.0000001
4 | bs: 16
5 | scheduler: 'steplr'
6 | gamma: 0.1
7 | step-size: 15
8 | rebalancing_fake: 0.3
9 | rebalancing_real: 1
10 | frames-per-video: 20 # Equidistant frames
11 |
12 | model:
13 | image-size: 224
14 | patch-size: 7
15 | num-classes: 1
16 | dim: 1280
17 | depth: 6
18 | dim-head: 64
19 | heads: 8
20 | mlp-dim: 2048
21 | emb-dim: 49
22 | dropout: 0.15
23 | emb-dropout: 0.15
24 |
--------------------------------------------------------------------------------
/model_test_train/deepfakes_dataset.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch.utils.data import DataLoader, TensorDataset, Dataset
3 | import cv2
4 | import numpy as np
5 |
6 | import uuid
7 | from albumentations import Compose, RandomBrightnessContrast, \
8 | HorizontalFlip, FancyPCA, HueSaturationValue, OneOf, ToGray, \
9 | ShiftScaleRotate, ImageCompression, PadIfNeeded, GaussNoise, GaussianBlur, Rotate
10 |
11 | from transforms.albu import IsotropicResize
12 |
13 | class DeepFakesDataset(Dataset):
14 | def __init__(self, images, labels, image_size, mode = 'train'):
15 | self.x = images
16 | self.y = torch.from_numpy(labels)
17 | self.image_size = image_size
18 | self.mode = mode
19 | self.n_samples = images.shape[0]
20 |
21 | def create_train_transforms(self, size):
22 | return Compose([
23 | ImageCompression(quality_lower=60, quality_upper=100, p=0.2),
24 | GaussNoise(p=0.3),
25 | #GaussianBlur(blur_limit=3, p=0.05),
26 | HorizontalFlip(),
27 | OneOf([
28 | IsotropicResize(max_side=size, interpolation_down=cv2.INTER_AREA, interpolation_up=cv2.INTER_CUBIC),
29 | IsotropicResize(max_side=size, interpolation_down=cv2.INTER_AREA, interpolation_up=cv2.INTER_LINEAR),
30 | IsotropicResize(max_side=size, interpolation_down=cv2.INTER_LINEAR, interpolation_up=cv2.INTER_LINEAR),
31 | ], p=1),
32 | PadIfNeeded(min_height=size, min_width=size, border_mode=cv2.BORDER_CONSTANT),
33 | OneOf([RandomBrightnessContrast(), FancyPCA(), HueSaturationValue()], p=0.4),
34 | ToGray(p=0.2),
35 | ShiftScaleRotate(shift_limit=0.1, scale_limit=0.2, rotate_limit=5, border_mode=cv2.BORDER_CONSTANT, p=0.5),
36 | ]
37 | )
38 |
39 | def create_val_transform(self, size):
40 | return Compose([
41 | IsotropicResize(max_side=size, interpolation_down=cv2.INTER_AREA, interpolation_up=cv2.INTER_CUBIC),
42 | PadIfNeeded(min_height=size, min_width=size, border_mode=cv2.BORDER_CONSTANT),
43 | ])
44 |
45 | def __getitem__(self, index):
46 | image = np.asarray(self.x[index])
47 |
48 | if self.mode == 'train':
49 | transform = self.create_train_transforms(self.image_size)
50 | else:
51 | transform = self.create_val_transform(self.image_size)
52 |
53 | unique = uuid.uuid4()
54 | #cv2.imwrite("../dataset/augmented_frames/isotropic_augmentation/"+str(unique)+"_"+str(index)+"_original.png", image)
55 |
56 | image = transform(image=image)['image']
57 |
58 | #cv2.imwrite("../dataset/augmented_frames/isotropic_augmentation/"+str(unique)+"_"+str(index)+".png", image)
59 |
60 | return torch.tensor(image).float(), self.y[index]
61 |
62 |
63 | def __len__(self):
64 | return self.n_samples
65 |
66 |
--------------------------------------------------------------------------------
/model_test_train/evit_model10.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch import nn
3 | from einops import rearrange
4 | from efficientnet_pytorch import EfficientNet
5 | import cv2
6 | import re
7 | import numpy as np
8 | from torch import einsum
9 | from random import randint
10 |
11 |
12 | class Residual(nn.Module):
13 | def __init__(self, fn):
14 | super().__init__()
15 | self.fn = fn
16 |
17 | def forward(self, x, **kwargs):
18 | return self.fn(x, **kwargs) + x
19 |
20 | class PreNorm(nn.Module):
21 | def __init__(self, dim, fn):
22 | super().__init__()
23 | self.norm = nn.LayerNorm(dim)
24 | self.fn = fn
25 |
26 | def forward(self, x, **kwargs):
27 | return self.fn(self.norm(x), **kwargs)
28 |
29 | class FeedForward(nn.Module):
30 | def __init__(self, dim, hidden_dim, dropout = 0.):
31 | super().__init__()
32 | self.net = nn.Sequential(
33 | nn.Linear(dim, hidden_dim),
34 | nn.GELU(),
35 | nn.Dropout(dropout),
36 | nn.Linear(hidden_dim, dim),
37 | nn.Dropout(dropout)
38 | )
39 | def forward(self, x):
40 | return self.net(x)
41 |
42 | class Attention(nn.Module):
43 | def __init__(self, dim, heads = 8, dim_head = 64, dropout = 0.):
44 | super().__init__()
45 | inner_dim = dim_head * heads
46 | project_out = not (heads == 1 and dim_head == dim)
47 |
48 | self.heads = heads
49 | self.scale = dim_head ** -0.5
50 |
51 | self.attend = nn.Softmax(dim = -1)
52 | self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
53 |
54 | self.to_out = nn.Sequential(
55 | nn.Linear(inner_dim, dim),
56 | nn.Dropout(dropout)
57 | ) if project_out else nn.Identity()
58 |
59 | #FIXME: the following methods (up to line 75) where added to enable tapping into the attention maps and gradients
60 | # for calculation of the Relevancy
61 |
62 | self.attn_gradients = None
63 | self.attention_map = None
64 |
65 |
66 | def save_attn_gradients(self, attn_gradients):
67 | self.attn_gradients = attn_gradients
68 |
69 | def get_attn_gradients(self):
70 | return self.attn_gradients
71 |
72 | def save_attention_map(self, attention_map):
73 | self.attention_map = attention_map
74 |
75 | def get_attention_map(self):
76 | return self.attention_map
77 |
78 | def forward(self, x, register_hook=False):
79 | b, n, _, h = *x.shape, self.heads
80 | qkv = self.to_qkv(x).chunk(3, dim = -1)
81 | q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), qkv)
82 |
83 | dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale
84 |
85 | attn = self.attend(dots)
86 |
87 | out = einsum('b h i j, b h j d -> b h i d', attn, v)
88 |
89 | self.save_attention_map(attn) #FIXME: Added to save attention gradients
90 | if register_hook:
91 | attn.register_hook(self.save_attn_gradients)
92 |
93 | out = rearrange(out, 'b h n d -> b n (h d)')
94 | return self.to_out(out)
95 |
96 | #FIXME: the Block calss was added to facilitate the model explaination
97 | class Block(nn.Module):
98 | def __init__(self, dim, heads, dim_head, drop_out, mlp_dim, norm_layer=nn.LayerNorm):
99 | super().__init__()
100 | self.norm1 = norm_layer(dim)
101 | self.attn = Attention(dim, heads=heads, dim_head=dim_head, dropout=drop_out)
102 | self.norm2 = norm_layer(dim)
103 | self.mlp = FeedForward(dim=dim,hidden_dim=mlp_dim,dropout=0)
104 |
105 | def forward(self, x, register_hook=False):
106 | x = x + self.attn(self.norm1(x), register_hook=register_hook)
107 | x = x +self.mlp(self.norm2(x))
108 | return x
109 |
110 |
111 |
112 |
113 | class Transformer(nn.Module):
114 | def __init__(self, dim, depth, heads, dim_head, mlp_dim, dropout = 0.):
115 | super().__init__()
116 |
117 | self.blocks =nn.ModuleList([Block(dim =dim, heads = heads, dim_head = dim_head, drop_out = dropout, mlp_dim=mlp_dim)
118 | for i in range(depth)]) #FIXME: an alternatived definition of layers using blocks
119 |
120 |
121 | # self.layers = nn.ModuleList([])
122 | # for _ in range(depth):
123 | # self.layers.append(nn.ModuleList([
124 | # PreNorm(dim, Attention(dim, heads = heads, dim_head = dim_head, dropout = dropout)),
125 | # PreNorm(dim, FeedForward(dim=dim, hidden_dim=mlp_dim, dropout = 0))
126 | # ]))
127 |
128 | def forward (self, x, register_hook=False): #FIXME: changed the forward to use the stracture of blocks
129 | for blk in self.blocks:
130 | x = blk(x,register_hook=register_hook)
131 |
132 | # def forward(self, x):
133 | # for attn, ff in self.layers:
134 | # x = attn(x) + x
135 | # x = ff(x) + x
136 | return x
137 |
138 | class EfficientViT(nn.Module):
139 | def __init__(self, config, channels=512, selected_efficient_net = 0):
140 | super().__init__()
141 |
142 | image_size = config['model']['image-size']
143 | patch_size = config['model']['patch-size']
144 | num_classes = config['model']['num-classes']
145 | dim = config['model']['dim']
146 | depth = config['model']['depth']
147 | heads = config['model']['heads']
148 | mlp_dim = config['model']['mlp-dim']
149 | emb_dim = config['model']['emb-dim']
150 | dim_head = config['model']['dim-head']
151 | dropout = config['model']['dropout']
152 | emb_dropout = config['model']['emb-dropout']
153 |
154 | assert image_size % patch_size == 0, 'image dimensions must be divisible by the patch size'
155 |
156 | self.selected_efficient_net = selected_efficient_net
157 |
158 | if selected_efficient_net == 0:
159 | self.efficient_net = EfficientNet.from_pretrained('efficientnet-b0')
160 | else:
161 | self.efficient_net = EfficientNet.from_pretrained('efficientnet-b7')
162 | checkpoint = torch.load("weights/final_999_DeepFakeClassifier_tf_efficientnet_b7_ns_0_23", map_location="cpu")
163 | state_dict = checkpoint.get("state_dict", checkpoint)
164 | self.efficient_net.load_state_dict({re.sub("^module.", "", k): v for k, v in state_dict.items()}, strict=False)
165 |
166 | for i in range(0, len(self.efficient_net._blocks)):
167 | for index, param in enumerate(self.efficient_net._blocks[i].parameters()):
168 | if i >= len(self.efficient_net._blocks)-3:
169 | param.requires_grad = True
170 | else:
171 | param.requires_grad = False
172 |
173 |
174 | self.num_patches = (image_size // patch_size) ** 2 #FIXME: corrected the formula
175 | patch_dim = channels * patch_size ** 2
176 | self.emb_dim = emb_dim
177 | self.patch_size = patch_size
178 | efficientnet_output_size = channels * patch_size ** 2
179 | self.pos_embedding = nn.Parameter(torch.randn(1, self.num_patches + 1, emb_dim))
180 | # FIXME: Change the patch_to_embedding from simple linear projection that degenerated the input to the transformer
181 | # to one token, to support 1024 tokens
182 | self.patch_to_embedding = nn.Conv1d(in_channels=1, out_channels=self.num_patches, kernel_size= dim, stride=dim)
183 | self.cls_token = nn.Parameter(torch.randn(1, 1, self.emb_dim))
184 | self.dropout = nn.Dropout(emb_dropout)
185 | self.transformer = Transformer(dim = emb_dim, depth = depth, heads =heads, dim_head = dim_head, mlp_dim = mlp_dim, dropout =dropout)
186 |
187 | self.to_cls_token = nn.Identity()
188 |
189 | self.mlp_head = nn.Sequential(
190 | nn.Linear(emb_dim, mlp_dim),
191 | nn.ReLU(),
192 | nn.Linear(mlp_dim, num_classes) )
193 |
194 | def forward(self, img, mask=None, register_hook=False):
195 | p = self.patch_size
196 | x = self.efficient_net.extract_features(img) # 1280x7x7
197 |
198 | y = rearrange(x, 'b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = p, p2 = p)
199 | y = self.patch_to_embedding(y) #FIXME: changed the patch_to_embedding above
200 |
201 | cls_tokens = self.cls_token.expand(x.shape[0], -1, -1) # FIXME: corrected to align with new patch embedding
202 | x = torch.cat((cls_tokens, y), 1)
203 | shape=x.shape[0]
204 | x += self.pos_embedding[0:shape]
205 | x = self.dropout(x)
206 | x = self.transformer(x, register_hook)
207 | x = self.to_cls_token(x[:, 0])
208 |
209 | return self.mlp_head(x)
210 |
211 |
--------------------------------------------------------------------------------
/model_test_train/results/tests/efficientnetB0_checkpoint89_All_All_acc94.0099833610649_loss0.42291456_f10.9631901840490797.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/noame12/Explainable_Attention_Based_Deepfake_Detector/96ad106528ca46da1f456a886739d9f38960027f/model_test_train/results/tests/efficientnetB0_checkpoint89_All_All_acc94.0099833610649_loss0.42291456_f10.9631901840490797.jpg
--------------------------------------------------------------------------------
/model_test_train/results/tests/efficientnetB0_checkpoint89_All_Deepfakes_acc93.0_loss0.5777812_f10.9292929292929293.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/noame12/Explainable_Attention_Based_Deepfake_Detector/96ad106528ca46da1f456a886739d9f38960027f/model_test_train/results/tests/efficientnetB0_checkpoint89_All_Deepfakes_acc93.0_loss0.5777812_f10.9292929292929293.jpg
--------------------------------------------------------------------------------
/model_test_train/results/tests/efficientnetB0_checkpoint89_All_Face2Face_acc96.0_loss0.56776446_f10.9607843137254902.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/noame12/Explainable_Attention_Based_Deepfake_Detector/96ad106528ca46da1f456a886739d9f38960027f/model_test_train/results/tests/efficientnetB0_checkpoint89_All_Face2Face_acc96.0_loss0.56776446_f10.9607843137254902.jpg
--------------------------------------------------------------------------------
/model_test_train/results/tests/efficientnetB0_checkpoint89_All_FaceShifter_acc96.0_loss0.5703947_f10.9607843137254902.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/noame12/Explainable_Attention_Based_Deepfake_Detector/96ad106528ca46da1f456a886739d9f38960027f/model_test_train/results/tests/efficientnetB0_checkpoint89_All_FaceShifter_acc96.0_loss0.5703947_f10.9607843137254902.jpg
--------------------------------------------------------------------------------
/model_test_train/results/tests/efficientnetB0_checkpoint89_All_FaceSwap_acc96.5_loss0.56512296_f10.9658536585365853.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/noame12/Explainable_Attention_Based_Deepfake_Detector/96ad106528ca46da1f456a886739d9f38960027f/model_test_train/results/tests/efficientnetB0_checkpoint89_All_FaceSwap_acc96.5_loss0.56512296_f10.9658536585365853.jpg
--------------------------------------------------------------------------------
/model_test_train/results/tests/efficientnetB0_checkpoint89_All_NeuralTextures_acc88.55_loss0.5907205_f10.87958.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/noame12/Explainable_Attention_Based_Deepfake_Detector/96ad106528ca46da1f456a886739d9f38960027f/model_test_train/results/tests/efficientnetB0_checkpoint89_All_NeuralTextures_acc88.55_loss0.5907205_f10.87958.jpg
--------------------------------------------------------------------------------
/model_test_train/results/tests/samples_list_Deepfakes_efficientnetB0_checkpoint89_All.csv:
--------------------------------------------------------------------------------
1 | video_name,label,video_prob,high_low_prob,example_file_name
2 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/082_103,1.0,0.9436879,0.99966335,123_0.png
3 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/012_026,1.0,0.8814115,0.9995864,351_0.png
4 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/037_072,1.0,0.9821345,0.99989283,417_0.png
5 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/000_003,1.0,0.9535594,0.99987745,59_0.png
6 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/098_092,1.0,0.999397,0.9999199,277_0.png
7 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/050_059,1.0,0.75085515,0.996653,305_0.png
8 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/061_080,1.0,0.99567884,0.99989617,81_0.png
9 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/041_063,1.0,0.70468557,0.9992988,123_0.png
10 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/081_087,1.0,0.9990124,0.9999441,917_0.png
11 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/090_086,1.0,0.99695915,0.9999372,226_0.png
12 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/056_996,1.0,0.9995572,0.9999572,251_0.png
13 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/070_057,1.0,0.989082,0.45667502,123_0.png
14 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/048_029,1.0,0.9559824,0.70205104,202_0.png
15 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/065_089,1.0,0.93030244,0.99941194,351_0.png
16 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/023_923,1.0,0.949539,0.99948955,220_0.png
17 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/036_035,1.0,0.69615096,0.99965227,1_0.png
18 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/005_010,1.0,0.61336344,0.015942896,1_0.png
19 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/040_997,1.0,0.88282263,0.99933016,202_0.png
20 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/001_870,1.0,0.86329097,0.5107032,220_0.png
21 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/027_009,1.0,0.93343097,0.61815727,220_0.png
22 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/097_033,1.0,0.23502898,0.019932766,211_0.png
23 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/024_073,1.0,0.46285105,0.018816978,211_0.png
24 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/051_332,1.0,0.9427685,0.99985814,1_0.png
25 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/096_101,1.0,0.6892946,0.9984079,104_0.png
26 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/034_590,1.0,0.9942188,0.9999249,296_0.png
27 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/019_018,1.0,0.9952459,0.99989283,1124_0.png
28 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/049_946,1.0,0.9998709,0.9999435,1_0.png
29 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/062_066,1.0,0.99206984,0.9999113,232_0.png
30 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/044_945,1.0,0.99236757,0.9999416,211_0.png
31 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/075_977,1.0,0.99989253,0.9999491,383_0.png
32 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/026_012,1.0,0.99983275,0.9444504,59_0.png
33 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/087_081,1.0,0.99166346,0.9998976,251_0.png
34 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/094_111,1.0,0.94148314,0.5314232,232_0.png
35 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/042_084,1.0,0.9065502,0.9998846,276_0.png
36 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/067_025,1.0,0.99774307,0.99993,351_0.png
37 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/085_124,1.0,0.98849314,0.99992657,1_0.png
38 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/095_053,1.0,0.43460423,0.010061421,861_0.png
39 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/080_061,1.0,0.45353577,0.031513385,276_0.png
40 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/055_147,1.0,0.8053285,0.998553,123_0.png
41 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/006_002,1.0,0.90882367,0.9997415,211_0.png
42 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/011_805,1.0,0.99980116,0.960052,351_0.png
43 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/064_991,1.0,0.97447485,0.9999019,383_0.png
44 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/068_028,1.0,0.99121875,0.999949,104_0.png
45 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/039_058,1.0,0.8135086,0.99921656,232_0.png
46 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/025_067,1.0,0.78936225,0.9996443,617_0.png
47 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/029_048,1.0,0.96583384,0.9995908,202_0.png
48 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/009_027,1.0,0.8226824,0.9989717,296_0.png
49 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/058_039,1.0,0.9877168,0.9997938,305_0.png
50 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/003_000,1.0,0.6818788,0.9984464,277_0.png
51 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/072_037,1.0,0.9386086,0.9998859,276_0.png
52 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/073_024,1.0,0.998172,0.99988055,407_0.png
53 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/071_054,1.0,0.37591398,0.081180245,1_0.png
54 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/054_071,1.0,0.7225689,0.9998574,251_0.png
55 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/063_041,1.0,0.9423165,0.9997943,81_0.png
56 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/092_098,1.0,0.7888074,0.99774086,249_0.png
57 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/043_110,1.0,0.938266,0.9998698,182_0.png
58 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/053_095,1.0,0.99283284,0.99993265,1_0.png
59 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/091_116,1.0,0.95913345,0.5724267,296_0.png
60 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/078_955,1.0,0.9918315,0.9997458,211_0.png
61 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/045_889,1.0,0.99954957,0.99990916,251_0.png
62 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/010_005,1.0,0.8311966,0.99872476,313_1.png
63 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/047_862,1.0,0.72627974,0.9998479,59_0.png
64 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/038_125,1.0,0.892715,0.9998647,123_0.png
65 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/057_070,1.0,0.9381003,0.9998971,1_0.png
66 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/084_042,1.0,0.99836856,0.99986637,182_0.png
67 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/013_883,1.0,0.97603554,0.999824,202_0.png
68 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/022_489,1.0,0.92829096,0.9997539,249_0.png
69 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/004_982,1.0,0.6581943,0.99926466,251_0.png
70 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/031_163,1.0,0.99751246,0.035525393,220_0.png
71 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/069_961,1.0,0.9777355,0.9999219,305_0.png
72 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/083_213,1.0,0.72450495,0.85740644,152_2.png
73 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/014_790,1.0,0.9904176,0.82237303,582_0.png
74 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/093_121,1.0,0.42213377,0.007973996,222_0.png
75 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/052_108,1.0,0.9443796,0.99963546,305_0.png
76 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/028_068,1.0,0.88471717,0.99987423,123_0.png
77 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/002_006,1.0,0.99930304,0.9999038,603_0.png
78 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/089_065,1.0,0.98011714,0.9998386,688_0.png
79 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/077_100,1.0,0.680912,0.99148434,232_0.png
80 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/032_944,1.0,0.9998762,0.99995244,1_0.png
81 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/035_036,1.0,0.44818807,0.08445022,211_0.png
82 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/007_132,1.0,0.9886145,0.9998708,211_0.png
83 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/015_919,1.0,0.8079767,0.9974642,123_0.png
84 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/008_990,1.0,0.79204935,0.9884094,582_0.png
85 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/033_097,1.0,0.6981413,0.14643341,603_0.png
86 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/088_060,1.0,0.22599004,0.0013168117,81_0.png
87 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/059_050,1.0,0.9999019,0.9999393,1_0.png
88 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/016_209,1.0,0.8071543,0.9996675,276_0.png
89 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/018_019,1.0,0.9924224,0.9999318,407_0.png
90 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/021_312,1.0,0.9330676,0.9998037,81_0.png
91 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/046_904,1.0,0.9876259,0.9999181,123_0.png
92 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/086_090,1.0,0.840102,0.99936444,617_0.png
93 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/060_088,1.0,0.98617125,0.83748746,144_0.png
94 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/066_062,1.0,0.9442674,0.99989974,123_0.png
95 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/030_193,1.0,0.73328876,0.9996208,232_0.png
96 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/017_803,1.0,0.9996643,0.99993336,216_0.png
97 | ../deep_fakes_explain/dataset/validation_set/Original/007,0.0,0.019099837,0.0007881882,144_0.png
98 | ../deep_fakes_explain/dataset/validation_set/Original/028,0.0,0.185574,0.0010462767,123_0.png
99 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/076_079,1.0,0.9679185,0.99980754,216_0.png
100 | ../deep_fakes_explain/dataset/validation_set/Original/040,0.0,0.14705108,0.0012253801,211_0.png
101 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/074_825,1.0,0.88964283,0.99988997,276_0.png
102 | ../deep_fakes_explain/dataset/validation_set/Original/001,0.0,0.022960376,0.00025944033,351_0.png
103 | ../deep_fakes_explain/dataset/validation_set/Original/054,0.0,0.007896773,0.00034712433,296_0.png
104 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/020_344,1.0,0.99798584,0.63896877,305_0.png
105 | ../deep_fakes_explain/dataset/validation_set/Original/049,0.0,0.00958485,0.0003448864,251_0.png
106 | ../deep_fakes_explain/dataset/validation_set/Original/024,0.0,0.075362645,0.00060971914,211_0.png
107 | ../deep_fakes_explain/dataset/validation_set/Original/058,0.0,0.08808818,0.0021179626,351_0.png
108 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/079_076,1.0,0.88091993,0.99967,351_0.png
109 | ../deep_fakes_explain/dataset/validation_set/Original/032,0.0,0.3419103,0.0076987715,222_0.png
110 | ../deep_fakes_explain/dataset/validation_set/Original/064,0.0,0.7133512,0.9995623,232_0.png
111 | ../deep_fakes_explain/dataset/validation_set/Original/059,0.0,0.045393225,0.00040419164,232_0.png
112 | ../deep_fakes_explain/dataset/validation_set/Original/051,0.0,0.00055479753,0.00029249367,296_0.png
113 | ../deep_fakes_explain/dataset/validation_set/Original/065,0.0,0.14039497,0.0008060557,123_0.png
114 | ../deep_fakes_explain/dataset/validation_set/Original/055,0.0,0.4262916,0.0048973216,211_0.png
115 | ../deep_fakes_explain/dataset/validation_set/Original/017,0.0,0.17780475,0.0007817579,351_0.png
116 | ../deep_fakes_explain/dataset/validation_set/Original/083,0.0,0.73791677,0.9039909,426_1.png
117 | ../deep_fakes_explain/dataset/validation_set/Original/078,0.0,0.048326626,0.0010874697,216_0.png
118 | ../deep_fakes_explain/dataset/validation_set/Original/097,0.0,0.27873847,0.009365472,232_0.png
119 | ../deep_fakes_explain/dataset/validation_set/Deepfakes/099_295,1.0,0.9976078,0.99993885,351_0.png
120 | ../deep_fakes_explain/dataset/validation_set/Original/084,0.0,0.13298994,0.0010836126,81_0.png
121 | ../deep_fakes_explain/dataset/validation_set/Original/012,0.0,0.13121295,0.000598288,276_0.png
122 | ../deep_fakes_explain/dataset/validation_set/Original/067,0.0,0.08112078,0.001752552,417_0.png
123 | ../deep_fakes_explain/dataset/validation_set/Original/004,0.0,0.20794038,0.0009891909,182_0.png
124 | ../deep_fakes_explain/dataset/validation_set/Original/096,0.0,0.026038436,0.0003648388,249_0.png
125 | ../deep_fakes_explain/dataset/validation_set/Original/092,0.0,0.0626341,0.00041590809,202_0.png
126 | ../deep_fakes_explain/dataset/validation_set/Original/009,0.0,0.718277,0.9998516,1_0.png
127 | ../deep_fakes_explain/dataset/validation_set/Original/057,0.0,0.007726637,0.00031345533,123_0.png
128 | ../deep_fakes_explain/dataset/validation_set/Original/087,0.0,0.07644729,0.00046623856,232_0.png
129 | ../deep_fakes_explain/dataset/validation_set/Original/061,0.0,0.0658887,0.00070842874,232_0.png
130 | ../deep_fakes_explain/dataset/validation_set/Original/029,0.0,0.26350927,0.012760827,144_0.png
131 | ../deep_fakes_explain/dataset/validation_set/Original/091,0.0,0.16075934,0.030915344,144_0.png
132 | ../deep_fakes_explain/dataset/validation_set/Original/046,0.0,0.038286112,0.0005906091,220_0.png
133 | ../deep_fakes_explain/dataset/validation_set/Original/002,0.0,0.0066675185,0.00038684814,603_0.png
134 | ../deep_fakes_explain/dataset/validation_set/Original/003,0.0,0.0064405273,0.0004076487,110_0.png
135 | ../deep_fakes_explain/dataset/validation_set/Original/086,0.0,0.36419308,0.041170016,667_0.png
136 | ../deep_fakes_explain/dataset/validation_set/Original/034,0.0,0.0520717,0.00039743716,383_0.png
137 | ../deep_fakes_explain/dataset/validation_set/Original/038,0.0,0.05089863,0.0014802099,222_0.png
138 | ../deep_fakes_explain/dataset/validation_set/Original/021,0.0,0.30078745,0.011267794,305_0.png
139 | ../deep_fakes_explain/dataset/validation_set/Original/025,0.0,0.5110241,0.0058552963,81_0.png
140 | ../deep_fakes_explain/dataset/validation_set/Original/047,0.0,0.47321573,0.015360911,144_0.png
141 | ../deep_fakes_explain/dataset/validation_set/Original/015,0.0,0.08380942,0.004673872,296_0.png
142 | ../deep_fakes_explain/dataset/validation_set/Original/085,0.0,0.015574703,0.00019339214,211_0.png
143 | ../deep_fakes_explain/dataset/validation_set/Original/081,0.0,0.04286192,0.00051653024,232_0.png
144 | ../deep_fakes_explain/dataset/validation_set/Original/073,0.0,0.14701243,0.00028651382,603_0.png
145 | ../deep_fakes_explain/dataset/validation_set/Original/008,0.0,0.012466999,0.00033319776,182_0.png
146 | ../deep_fakes_explain/dataset/validation_set/Original/022,0.0,0.30402783,0.005144331,249_0.png
147 | ../deep_fakes_explain/dataset/validation_set/Original/068,0.0,0.27261528,0.0026739545,123_0.png
148 | ../deep_fakes_explain/dataset/validation_set/Original/014,0.0,0.23352577,0.21293849,620_0.png
149 | ../deep_fakes_explain/dataset/validation_set/Original/094,0.0,0.41570425,0.10956628,162_1.png
150 | ../deep_fakes_explain/dataset/validation_set/Original/053,0.0,0.081394464,0.00030341017,211_0.png
151 | ../deep_fakes_explain/dataset/validation_set/Original/005,0.0,0.032066744,0.0016819175,305_1.png
152 | ../deep_fakes_explain/dataset/validation_set/Original/018,0.0,0.2568315,0.0008767645,202_0.png
153 | ../deep_fakes_explain/dataset/validation_set/Original/027,0.0,0.66694665,0.66694665,143_1.png
154 | ../deep_fakes_explain/dataset/validation_set/Original/050,0.0,0.3901606,0.0632311,211_0.png
155 | ../deep_fakes_explain/dataset/validation_set/Original/013,0.0,0.011166614,0.0005450935,123_0.png
156 | ../deep_fakes_explain/dataset/validation_set/Original/066,0.0,0.032823216,0.0015409103,305_0.png
157 | ../deep_fakes_explain/dataset/validation_set/Original/060,0.0,0.23908357,0.03749671,1_0.png
158 | ../deep_fakes_explain/dataset/validation_set/Original/088,0.0,0.025390005,0.0005013456,351_0.png
159 | ../deep_fakes_explain/dataset/validation_set/Original/072,0.0,0.31113556,0.010056663,251_0.png
160 | ../deep_fakes_explain/dataset/validation_set/Original/062,0.0,0.17610374,0.000440764,305_0.png
161 | ../deep_fakes_explain/dataset/validation_set/Original/076,0.0,0.15731053,0.000777189,81_0.png
162 | ../deep_fakes_explain/dataset/validation_set/Original/019,0.0,0.42165923,0.034127377,220_0.png
163 | ../deep_fakes_explain/dataset/validation_set/Original/098,0.0,0.024264248,0.0010942505,216_0.png
164 | ../deep_fakes_explain/dataset/validation_set/Original/041,0.0,0.074100666,0.0005968545,110_0.png
165 | ../deep_fakes_explain/dataset/validation_set/Original/037,0.0,0.026885135,0.00034899914,182_0.png
166 | ../deep_fakes_explain/dataset/validation_set/Original/075,0.0,0.0037737724,0.00022003727,276_0.png
167 | ../deep_fakes_explain/dataset/validation_set/Original/063,0.0,0.16128919,0.0008019851,276_0.png
168 | ../deep_fakes_explain/dataset/validation_set/Original/023,0.0,0.20741005,0.31245244,220_0.png
169 | ../deep_fakes_explain/dataset/validation_set/Original/035,0.0,0.28481936,0.0022529801,182_0.png
170 | ../deep_fakes_explain/dataset/validation_set/Original/016,0.0,0.11497501,0.0067070387,658_1.png
171 | ../deep_fakes_explain/dataset/validation_set/Original/033,0.0,0.03947296,0.012914691,603_0.png
172 | ../deep_fakes_explain/dataset/validation_set/Original/077,0.0,0.40542197,0.0038475338,202_0.png
173 | ../deep_fakes_explain/dataset/validation_set/Original/082,0.0,0.0011867476,0.00029767226,251_0.png
174 | ../deep_fakes_explain/dataset/validation_set/Original/036,0.0,0.33824733,0.018248927,249_0.png
175 | ../deep_fakes_explain/dataset/validation_set/Original/011,0.0,0.9034193,0.99649733,447_1.png
176 | ../deep_fakes_explain/dataset/validation_set/Original/042,0.0,0.018161891,0.00016859183,104_0.png
177 | ../deep_fakes_explain/dataset/validation_set/Original/089,0.0,0.4474884,0.0074283937,296_0.png
178 | ../deep_fakes_explain/dataset/validation_set/Original/079,0.0,0.3315651,0.003137986,305_0.png
179 | ../deep_fakes_explain/dataset/validation_set/Original/056,0.0,0.48727208,0.0068734577,296_0.png
180 | ../deep_fakes_explain/dataset/validation_set/Original/080,0.0,0.32498786,0.0070975744,351_0.png
181 | ../deep_fakes_explain/dataset/validation_set/Original/048,0.0,0.082124576,0.0020456014,104_0.png
182 | ../deep_fakes_explain/dataset/validation_set/Original/010,0.0,0.2605654,0.0004641971,417_0.png
183 | ../deep_fakes_explain/dataset/validation_set/Original/095,0.0,0.37147158,0.31249788,681_1.png
184 | ../deep_fakes_explain/dataset/validation_set/Original/000,0.0,0.022572933,0.0005651052,351_0.png
185 | ../deep_fakes_explain/dataset/validation_set/Original/074,0.0,0.110990286,0.0074868826,216_0.png
186 | ../deep_fakes_explain/dataset/validation_set/Original/026,0.0,0.27066436,0.097405404,123_0.png
187 | ../deep_fakes_explain/dataset/validation_set/Original/006,0.0,0.09833457,0.0009872802,110_0.png
188 | ../deep_fakes_explain/dataset/validation_set/Original/093,0.0,0.20884037,0.0005126162,59_0.png
189 | ../deep_fakes_explain/dataset/validation_set/Original/044,0.0,0.23673336,0.0047577643,216_0.png
190 | ../deep_fakes_explain/dataset/validation_set/Original/052,0.0,0.0016064163,0.0002879839,220_0.png
191 | ../deep_fakes_explain/dataset/validation_set/Original/039,0.0,0.22989762,0.00060701167,351_0.png
192 | ../deep_fakes_explain/dataset/validation_set/Original/031,0.0,0.13467361,0.06395149,53_1.png
193 | ../deep_fakes_explain/dataset/validation_set/Original/090,0.0,0.023865988,0.00050994504,232_0.png
194 | ../deep_fakes_explain/dataset/validation_set/Original/070,0.0,0.15479012,0.06447731,220_0.png
195 | ../deep_fakes_explain/dataset/validation_set/Original/071,0.0,0.3493236,0.08828545,104_0.png
196 | ../deep_fakes_explain/dataset/validation_set/Original/020,0.0,0.21335758,0.113004334,351_0.png
197 | ../deep_fakes_explain/dataset/validation_set/Original/043,0.0,0.7589952,0.9970618,211_0.png
198 | ../deep_fakes_explain/dataset/validation_set/Original/045,0.0,0.12082268,0.0041052303,59_0.png
199 | ../deep_fakes_explain/dataset/validation_set/Original/099,0.0,0.23495859,0.45832977,220_0.png
200 | ../deep_fakes_explain/dataset/validation_set/Original/069,0.0,0.16267096,0.0014619352,744_0.png
201 | ../deep_fakes_explain/dataset/validation_set/Original/030,0.0,0.07430573,0.0006902066,1_0.png
202 |
--------------------------------------------------------------------------------
/model_test_train/results/tests/samples_list_Face2Face_efficientnetB0_checkpoint89_All.csv:
--------------------------------------------------------------------------------
1 | video_name,label,video_prob,high_low_prob,example_file_name
2 | ../deep_fakes_explain/dataset/validation_set/Face2Face/081_087,1.0,0.9993543,0.99993837,216_0.png
3 | ../deep_fakes_explain/dataset/validation_set/Face2Face/041_063,1.0,0.9785679,0.9998685,226_0.png
4 | ../deep_fakes_explain/dataset/validation_set/Face2Face/082_103,1.0,0.99880105,0.9999316,202_0.png
5 | ../deep_fakes_explain/dataset/validation_set/Face2Face/050_059,1.0,0.9996274,0.9999225,643_0.png
6 | ../deep_fakes_explain/dataset/validation_set/Face2Face/037_072,1.0,0.9838988,0.9999373,276_0.png
7 | ../deep_fakes_explain/dataset/validation_set/Face2Face/061_080,1.0,0.9255873,0.9999198,220_0.png
8 | ../deep_fakes_explain/dataset/validation_set/Face2Face/012_026,1.0,0.8670556,0.9997371,1_0.png
9 | ../deep_fakes_explain/dataset/validation_set/Face2Face/000_003,1.0,0.9623454,0.99992347,222_0.png
10 | ../deep_fakes_explain/dataset/validation_set/Face2Face/098_092,1.0,0.9645497,0.9998919,81_0.png
11 | ../deep_fakes_explain/dataset/validation_set/Face2Face/090_086,1.0,0.9772458,0.999933,688_0.png
12 | ../deep_fakes_explain/dataset/validation_set/Face2Face/048_029,1.0,0.909907,0.6639859,383_0.png
13 | ../deep_fakes_explain/dataset/validation_set/Face2Face/070_057,1.0,0.90663224,0.67551965,226_0.png
14 | ../deep_fakes_explain/dataset/validation_set/Face2Face/056_996,1.0,0.99986553,0.99996185,226_0.png
15 | ../deep_fakes_explain/dataset/validation_set/Face2Face/065_089,1.0,0.9185457,0.9993106,351_0.png
16 | ../deep_fakes_explain/dataset/validation_set/Face2Face/023_923,1.0,0.99589556,0.42728955,220_0.png
17 | ../deep_fakes_explain/dataset/validation_set/Face2Face/005_010,1.0,0.9925329,0.006407785,1_0.png
18 | ../deep_fakes_explain/dataset/validation_set/Face2Face/036_035,1.0,0.99947065,0.99995065,249_0.png
19 | ../deep_fakes_explain/dataset/validation_set/Face2Face/001_870,1.0,0.96458685,0.99994004,220_0.png
20 | ../deep_fakes_explain/dataset/validation_set/Face2Face/024_073,1.0,0.39732262,0.0006280147,603_0.png
21 | ../deep_fakes_explain/dataset/validation_set/Face2Face/027_009,1.0,0.9897515,0.999923,582_0.png
22 | ../deep_fakes_explain/dataset/validation_set/Face2Face/040_997,1.0,0.9998502,0.99995434,1_0.png
23 | ../deep_fakes_explain/dataset/validation_set/Face2Face/097_033,1.0,0.9551423,0.9998647,620_0.png
24 | ../deep_fakes_explain/dataset/validation_set/Face2Face/019_018,1.0,0.99992466,0.9999577,182_0.png
25 | ../deep_fakes_explain/dataset/validation_set/Face2Face/075_977,1.0,0.9580013,0.9999063,641_0.png
26 | ../deep_fakes_explain/dataset/validation_set/Face2Face/051_332,1.0,0.9986202,0.9999442,305_0.png
27 | ../deep_fakes_explain/dataset/validation_set/Face2Face/096_101,1.0,0.9994446,0.9999447,620_0.png
28 | ../deep_fakes_explain/dataset/validation_set/Face2Face/049_946,1.0,0.9476474,0.99992645,220_0.png
29 | ../deep_fakes_explain/dataset/validation_set/Face2Face/034_590,1.0,0.9996547,0.9999559,144_0.png
30 | ../deep_fakes_explain/dataset/validation_set/Face2Face/026_012,1.0,0.8377428,0.8800669,81_0.png
31 | ../deep_fakes_explain/dataset/validation_set/Face2Face/044_945,1.0,0.745279,0.9999286,81_0.png
32 | ../deep_fakes_explain/dataset/validation_set/Face2Face/062_066,1.0,0.79223937,0.9997038,276_0.png
33 | ../deep_fakes_explain/dataset/validation_set/Face2Face/094_111,1.0,0.99995136,0.73587495,182_0.png
34 | ../deep_fakes_explain/dataset/validation_set/Face2Face/087_081,1.0,0.99978065,0.9999491,305_0.png
35 | ../deep_fakes_explain/dataset/validation_set/Face2Face/064_991,1.0,0.97923034,0.9999517,202_0.png
36 | ../deep_fakes_explain/dataset/validation_set/Face2Face/067_025,1.0,0.9307484,0.9999294,667_0.png
37 | ../deep_fakes_explain/dataset/validation_set/Face2Face/085_124,1.0,0.9565018,0.9999294,220_0.png
38 | ../deep_fakes_explain/dataset/validation_set/Face2Face/055_147,1.0,0.9998345,0.9999577,81_0.png
39 | ../deep_fakes_explain/dataset/validation_set/Face2Face/042_084,1.0,0.8931527,0.9999585,123_0.png
40 | ../deep_fakes_explain/dataset/validation_set/Face2Face/006_002,1.0,0.9607054,0.9998627,232_0.png
41 | ../deep_fakes_explain/dataset/validation_set/Face2Face/011_805,1.0,0.9998499,0.97060835,232_0.png
42 | ../deep_fakes_explain/dataset/validation_set/Face2Face/080_061,1.0,0.99948466,0.99994504,220_0.png
43 | ../deep_fakes_explain/dataset/validation_set/Face2Face/095_053,1.0,0.9571021,0.9999449,222_0.png
44 | ../deep_fakes_explain/dataset/validation_set/Face2Face/068_028,1.0,0.99881285,0.9999567,104_0.png
45 | ../deep_fakes_explain/dataset/validation_set/Face2Face/025_067,1.0,0.9999549,0.9999645,305_0.png
46 | ../deep_fakes_explain/dataset/validation_set/Face2Face/039_058,1.0,0.9993393,0.99995434,1_0.png
47 | ../deep_fakes_explain/dataset/validation_set/Face2Face/029_048,1.0,0.99988043,0.9999552,144_0.png
48 | ../deep_fakes_explain/dataset/validation_set/Face2Face/058_039,1.0,0.6618835,0.9985274,211_0.png
49 | ../deep_fakes_explain/dataset/validation_set/Face2Face/072_037,1.0,0.99994135,0.9999608,232_0.png
50 | ../deep_fakes_explain/dataset/validation_set/Face2Face/003_000,1.0,0.98691386,0.9999585,123_0.png
51 | ../deep_fakes_explain/dataset/validation_set/Face2Face/009_027,1.0,0.9995243,0.9999378,123_0.png
52 | ../deep_fakes_explain/dataset/validation_set/Face2Face/091_116,1.0,0.9263687,0.443654,305_0.png
53 | ../deep_fakes_explain/dataset/validation_set/Face2Face/071_054,1.0,0.9229421,0.99842525,216_0.png
54 | ../deep_fakes_explain/dataset/validation_set/Face2Face/054_071,1.0,0.8988191,0.99991596,144_0.png
55 | ../deep_fakes_explain/dataset/validation_set/Face2Face/063_041,1.0,0.9855728,0.99993086,251_0.png
56 | ../deep_fakes_explain/dataset/validation_set/Face2Face/043_110,1.0,0.99658346,0.99994266,305_0.png
57 | ../deep_fakes_explain/dataset/validation_set/Face2Face/073_024,1.0,0.7073568,0.9968912,296_0.png
58 | ../deep_fakes_explain/dataset/validation_set/Face2Face/092_098,1.0,0.9132754,0.9997074,251_0.png
59 | ../deep_fakes_explain/dataset/validation_set/Face2Face/053_095,1.0,0.95272654,0.9999354,232_0.png
60 | ../deep_fakes_explain/dataset/validation_set/Face2Face/078_955,1.0,0.6326073,0.99972624,211_0.png
61 | ../deep_fakes_explain/dataset/validation_set/Face2Face/045_889,1.0,0.9978971,0.99993753,110_0.png
62 | ../deep_fakes_explain/dataset/validation_set/Face2Face/010_005,1.0,0.9844553,0.9938273,313_1.png
63 | ../deep_fakes_explain/dataset/validation_set/Face2Face/038_125,1.0,0.9837858,0.9999347,182_0.png
64 | ../deep_fakes_explain/dataset/validation_set/Face2Face/047_862,1.0,0.98652285,0.9999256,383_0.png
65 | ../deep_fakes_explain/dataset/validation_set/Face2Face/057_070,1.0,0.9968749,0.9999571,296_0.png
66 | ../deep_fakes_explain/dataset/validation_set/Face2Face/013_883,1.0,0.94762415,0.9999434,211_0.png
67 | ../deep_fakes_explain/dataset/validation_set/Face2Face/084_042,1.0,0.9537098,0.9999478,211_0.png
68 | ../deep_fakes_explain/dataset/validation_set/Face2Face/069_961,1.0,0.9533931,0.9999589,249_0.png
69 | ../deep_fakes_explain/dataset/validation_set/Face2Face/052_108,1.0,0.9889684,0.9998665,226_0.png
70 | ../deep_fakes_explain/dataset/validation_set/Face2Face/004_982,1.0,0.940309,0.9999083,220_0.png
71 | ../deep_fakes_explain/dataset/validation_set/Face2Face/022_489,1.0,0.99994355,0.16492836,276_0.png
72 | ../deep_fakes_explain/dataset/validation_set/Face2Face/031_163,1.0,0.9930687,0.05967842,220_0.png
73 | ../deep_fakes_explain/dataset/validation_set/Face2Face/014_790,1.0,0.9978879,0.77111256,276_0.png
74 | ../deep_fakes_explain/dataset/validation_set/Face2Face/083_213,1.0,0.95257866,0.6739122,1_0.png
75 | ../deep_fakes_explain/dataset/validation_set/Face2Face/002_006,1.0,0.6107345,0.99982196,1_0.png
76 | ../deep_fakes_explain/dataset/validation_set/Face2Face/093_121,1.0,0.9959386,0.99994946,104_0.png
77 | ../deep_fakes_explain/dataset/validation_set/Face2Face/028_068,1.0,0.9999571,0.9999685,232_0.png
78 | ../deep_fakes_explain/dataset/validation_set/Face2Face/035_036,1.0,0.8200731,0.99929035,123_0.png
79 | ../deep_fakes_explain/dataset/validation_set/Face2Face/077_100,1.0,0.9571096,0.99994695,351_0.png
80 | ../deep_fakes_explain/dataset/validation_set/Face2Face/032_944,1.0,0.999952,0.9999682,251_0.png
81 | ../deep_fakes_explain/dataset/validation_set/Face2Face/089_065,1.0,0.9998328,0.99995697,220_0.png
82 | ../deep_fakes_explain/dataset/validation_set/Face2Face/015_919,1.0,0.97387993,0.9999285,144_0.png
83 | ../deep_fakes_explain/dataset/validation_set/Face2Face/033_097,1.0,0.56243753,0.08828699,305_0.png
84 | ../deep_fakes_explain/dataset/validation_set/Face2Face/007_132,1.0,0.61656225,0.99854493,144_0.png
85 | ../deep_fakes_explain/dataset/validation_set/Face2Face/008_990,1.0,0.71367234,0.99984443,249_0.png
86 | ../deep_fakes_explain/dataset/validation_set/Face2Face/016_209,1.0,0.3383803,0.0062941923,128_1.png
87 | ../deep_fakes_explain/dataset/validation_set/Face2Face/088_060,1.0,0.99993736,0.999961,182_0.png
88 | ../deep_fakes_explain/dataset/validation_set/Face2Face/086_090,1.0,0.9973912,0.9999443,81_0.png
89 | ../deep_fakes_explain/dataset/validation_set/Face2Face/021_312,1.0,0.997966,0.9999267,226_0.png
90 | ../deep_fakes_explain/dataset/validation_set/Face2Face/018_019,1.0,0.9987734,0.999954,296_0.png
91 | ../deep_fakes_explain/dataset/validation_set/Face2Face/059_050,1.0,0.9339062,0.99986315,211_0.png
92 | ../deep_fakes_explain/dataset/validation_set/Face2Face/030_193,1.0,0.9914908,0.9999573,1_0.png
93 | ../deep_fakes_explain/dataset/validation_set/Face2Face/066_062,1.0,0.96665937,0.9999548,220_0.png
94 | ../deep_fakes_explain/dataset/validation_set/Face2Face/060_088,1.0,0.96369356,0.95529467,617_0.png
95 | ../deep_fakes_explain/dataset/validation_set/Face2Face/046_904,1.0,0.99995464,0.9999651,211_0.png
96 | ../deep_fakes_explain/dataset/validation_set/Face2Face/017_803,1.0,0.9923334,0.99993336,59_0.png
97 | ../deep_fakes_explain/dataset/validation_set/Original/007,0.0,0.019099837,0.0007881882,144_0.png
98 | ../deep_fakes_explain/dataset/validation_set/Original/040,0.0,0.14705108,0.0012253801,211_0.png
99 | ../deep_fakes_explain/dataset/validation_set/Original/028,0.0,0.185574,0.0010462767,123_0.png
100 | ../deep_fakes_explain/dataset/validation_set/Face2Face/074_825,1.0,0.97397864,0.16826889,220_0.png
101 | ../deep_fakes_explain/dataset/validation_set/Face2Face/076_079,1.0,0.99995416,0.99997044,226_0.png
102 | ../deep_fakes_explain/dataset/validation_set/Original/001,0.0,0.022960376,0.00025944033,351_0.png
103 | ../deep_fakes_explain/dataset/validation_set/Original/054,0.0,0.007896773,0.00034712433,296_0.png
104 | ../deep_fakes_explain/dataset/validation_set/Face2Face/079_076,1.0,0.99361354,0.99995303,305_0.png
105 | ../deep_fakes_explain/dataset/validation_set/Original/049,0.0,0.00958485,0.0003448864,251_0.png
106 | ../deep_fakes_explain/dataset/validation_set/Face2Face/020_344,1.0,0.9683021,0.6952674,617_0.png
107 | ../deep_fakes_explain/dataset/validation_set/Original/058,0.0,0.08808818,0.0021179626,351_0.png
108 | ../deep_fakes_explain/dataset/validation_set/Original/064,0.0,0.7133512,0.9995623,232_0.png
109 | ../deep_fakes_explain/dataset/validation_set/Original/024,0.0,0.075362645,0.00060971914,211_0.png
110 | ../deep_fakes_explain/dataset/validation_set/Original/051,0.0,0.00055479753,0.00029249367,296_0.png
111 | ../deep_fakes_explain/dataset/validation_set/Original/059,0.0,0.045393225,0.00040419164,232_0.png
112 | ../deep_fakes_explain/dataset/validation_set/Original/065,0.0,0.14039497,0.0008060557,123_0.png
113 | ../deep_fakes_explain/dataset/validation_set/Original/032,0.0,0.3419103,0.0076987715,222_0.png
114 | ../deep_fakes_explain/dataset/validation_set/Original/004,0.0,0.20794038,0.0009891909,182_0.png
115 | ../deep_fakes_explain/dataset/validation_set/Original/017,0.0,0.17780475,0.0007817579,351_0.png
116 | ../deep_fakes_explain/dataset/validation_set/Original/097,0.0,0.27873847,0.009365472,232_0.png
117 | ../deep_fakes_explain/dataset/validation_set/Original/055,0.0,0.4262916,0.0048973216,211_0.png
118 | ../deep_fakes_explain/dataset/validation_set/Original/078,0.0,0.048326626,0.0010874697,216_0.png
119 | ../deep_fakes_explain/dataset/validation_set/Original/083,0.0,0.73791677,0.9039909,426_1.png
120 | ../deep_fakes_explain/dataset/validation_set/Original/067,0.0,0.08112078,0.001752552,417_0.png
121 | ../deep_fakes_explain/dataset/validation_set/Original/012,0.0,0.13121295,0.000598288,276_0.png
122 | ../deep_fakes_explain/dataset/validation_set/Face2Face/099_295,1.0,0.9568769,0.9999285,351_0.png
123 | ../deep_fakes_explain/dataset/validation_set/Original/009,0.0,0.718277,0.9998516,1_0.png
124 | ../deep_fakes_explain/dataset/validation_set/Original/096,0.0,0.026038436,0.0003648388,249_0.png
125 | ../deep_fakes_explain/dataset/validation_set/Original/084,0.0,0.13298994,0.0010836126,81_0.png
126 | ../deep_fakes_explain/dataset/validation_set/Original/092,0.0,0.0626341,0.00041590809,202_0.png
127 | ../deep_fakes_explain/dataset/validation_set/Original/091,0.0,0.16075934,0.030915344,144_0.png
128 | ../deep_fakes_explain/dataset/validation_set/Original/046,0.0,0.038286112,0.0005906091,220_0.png
129 | ../deep_fakes_explain/dataset/validation_set/Original/087,0.0,0.07644729,0.00046623856,232_0.png
130 | ../deep_fakes_explain/dataset/validation_set/Original/034,0.0,0.0520717,0.00039743716,383_0.png
131 | ../deep_fakes_explain/dataset/validation_set/Original/029,0.0,0.26350927,0.012760827,144_0.png
132 | ../deep_fakes_explain/dataset/validation_set/Original/057,0.0,0.007726637,0.00031345533,123_0.png
133 | ../deep_fakes_explain/dataset/validation_set/Original/061,0.0,0.0658887,0.00070842874,232_0.png
134 | ../deep_fakes_explain/dataset/validation_set/Original/003,0.0,0.0064405273,0.0004076487,110_0.png
135 | ../deep_fakes_explain/dataset/validation_set/Original/002,0.0,0.0066675185,0.00038684814,603_0.png
136 | ../deep_fakes_explain/dataset/validation_set/Original/086,0.0,0.36419308,0.041170016,667_0.png
137 | ../deep_fakes_explain/dataset/validation_set/Original/021,0.0,0.30078745,0.011267794,305_0.png
138 | ../deep_fakes_explain/dataset/validation_set/Original/038,0.0,0.05089863,0.0014802099,222_0.png
139 | ../deep_fakes_explain/dataset/validation_set/Original/015,0.0,0.08380942,0.004673872,296_0.png
140 | ../deep_fakes_explain/dataset/validation_set/Original/047,0.0,0.47321573,0.015360911,144_0.png
141 | ../deep_fakes_explain/dataset/validation_set/Original/073,0.0,0.14701243,0.00028651382,603_0.png
142 | ../deep_fakes_explain/dataset/validation_set/Original/085,0.0,0.015574703,0.00019339214,211_0.png
143 | ../deep_fakes_explain/dataset/validation_set/Original/081,0.0,0.04286192,0.00051653024,232_0.png
144 | ../deep_fakes_explain/dataset/validation_set/Original/008,0.0,0.012466999,0.00033319776,182_0.png
145 | ../deep_fakes_explain/dataset/validation_set/Original/025,0.0,0.5110241,0.0058552963,81_0.png
146 | ../deep_fakes_explain/dataset/validation_set/Original/022,0.0,0.30402783,0.005144331,249_0.png
147 | ../deep_fakes_explain/dataset/validation_set/Original/094,0.0,0.41570425,0.10956628,162_1.png
148 | ../deep_fakes_explain/dataset/validation_set/Original/005,0.0,0.032066744,0.0016819175,305_1.png
149 | ../deep_fakes_explain/dataset/validation_set/Original/053,0.0,0.081394464,0.00030341017,211_0.png
150 | ../deep_fakes_explain/dataset/validation_set/Original/014,0.0,0.23352577,0.21293849,620_0.png
151 | ../deep_fakes_explain/dataset/validation_set/Original/068,0.0,0.27261528,0.0026739545,123_0.png
152 | ../deep_fakes_explain/dataset/validation_set/Original/013,0.0,0.011166614,0.0005450935,123_0.png
153 | ../deep_fakes_explain/dataset/validation_set/Original/018,0.0,0.2568315,0.0008767645,202_0.png
154 | ../deep_fakes_explain/dataset/validation_set/Original/027,0.0,0.66694665,0.66694665,143_1.png
155 | ../deep_fakes_explain/dataset/validation_set/Original/066,0.0,0.032823216,0.0015409103,305_0.png
156 | ../deep_fakes_explain/dataset/validation_set/Original/060,0.0,0.23908357,0.03749671,1_0.png
157 | ../deep_fakes_explain/dataset/validation_set/Original/072,0.0,0.31113556,0.010056663,251_0.png
158 | ../deep_fakes_explain/dataset/validation_set/Original/050,0.0,0.3901606,0.0632311,211_0.png
159 | ../deep_fakes_explain/dataset/validation_set/Original/019,0.0,0.42165923,0.034127377,220_0.png
160 | ../deep_fakes_explain/dataset/validation_set/Original/062,0.0,0.17610374,0.000440764,305_0.png
161 | ../deep_fakes_explain/dataset/validation_set/Original/088,0.0,0.025390005,0.0005013456,351_0.png
162 | ../deep_fakes_explain/dataset/validation_set/Original/076,0.0,0.15731053,0.000777189,81_0.png
163 | ../deep_fakes_explain/dataset/validation_set/Original/098,0.0,0.024264248,0.0010942505,216_0.png
164 | ../deep_fakes_explain/dataset/validation_set/Original/041,0.0,0.074100666,0.0005968545,110_0.png
165 | ../deep_fakes_explain/dataset/validation_set/Original/023,0.0,0.20741005,0.31245244,220_0.png
166 | ../deep_fakes_explain/dataset/validation_set/Original/037,0.0,0.026885135,0.00034899914,182_0.png
167 | ../deep_fakes_explain/dataset/validation_set/Original/035,0.0,0.28481936,0.0022529801,182_0.png
168 | ../deep_fakes_explain/dataset/validation_set/Original/063,0.0,0.16128919,0.0008019851,276_0.png
169 | ../deep_fakes_explain/dataset/validation_set/Original/075,0.0,0.0037737724,0.00022003727,276_0.png
170 | ../deep_fakes_explain/dataset/validation_set/Original/033,0.0,0.03947296,0.012914691,603_0.png
171 | ../deep_fakes_explain/dataset/validation_set/Original/077,0.0,0.40542197,0.0038475338,202_0.png
172 | ../deep_fakes_explain/dataset/validation_set/Original/016,0.0,0.11497501,0.0067070387,658_1.png
173 | ../deep_fakes_explain/dataset/validation_set/Original/082,0.0,0.0011867476,0.00029767226,251_0.png
174 | ../deep_fakes_explain/dataset/validation_set/Original/042,0.0,0.018161891,0.00016859183,104_0.png
175 | ../deep_fakes_explain/dataset/validation_set/Original/036,0.0,0.33824733,0.018248927,249_0.png
176 | ../deep_fakes_explain/dataset/validation_set/Original/011,0.0,0.9034193,0.99649733,447_1.png
177 | ../deep_fakes_explain/dataset/validation_set/Original/079,0.0,0.3315651,0.003137986,305_0.png
178 | ../deep_fakes_explain/dataset/validation_set/Original/089,0.0,0.4474884,0.0074283937,296_0.png
179 | ../deep_fakes_explain/dataset/validation_set/Original/080,0.0,0.32498786,0.0070975744,351_0.png
180 | ../deep_fakes_explain/dataset/validation_set/Original/056,0.0,0.48727208,0.0068734577,296_0.png
181 | ../deep_fakes_explain/dataset/validation_set/Original/048,0.0,0.082124576,0.0020456014,104_0.png
182 | ../deep_fakes_explain/dataset/validation_set/Original/095,0.0,0.37147158,0.31249788,681_1.png
183 | ../deep_fakes_explain/dataset/validation_set/Original/010,0.0,0.2605654,0.0004641971,417_0.png
184 | ../deep_fakes_explain/dataset/validation_set/Original/000,0.0,0.022572933,0.0005651052,351_0.png
185 | ../deep_fakes_explain/dataset/validation_set/Original/074,0.0,0.110990286,0.0074868826,216_0.png
186 | ../deep_fakes_explain/dataset/validation_set/Original/006,0.0,0.09833457,0.0009872802,110_0.png
187 | ../deep_fakes_explain/dataset/validation_set/Original/026,0.0,0.27066436,0.097405404,123_0.png
188 | ../deep_fakes_explain/dataset/validation_set/Original/039,0.0,0.22989762,0.00060701167,351_0.png
189 | ../deep_fakes_explain/dataset/validation_set/Original/093,0.0,0.20884037,0.0005126162,59_0.png
190 | ../deep_fakes_explain/dataset/validation_set/Original/070,0.0,0.15479012,0.06447731,220_0.png
191 | ../deep_fakes_explain/dataset/validation_set/Original/044,0.0,0.23673336,0.0047577643,216_0.png
192 | ../deep_fakes_explain/dataset/validation_set/Original/052,0.0,0.0016064163,0.0002879839,220_0.png
193 | ../deep_fakes_explain/dataset/validation_set/Original/090,0.0,0.023865988,0.00050994504,232_0.png
194 | ../deep_fakes_explain/dataset/validation_set/Original/031,0.0,0.13467361,0.06395149,53_1.png
195 | ../deep_fakes_explain/dataset/validation_set/Original/020,0.0,0.21335758,0.113004334,351_0.png
196 | ../deep_fakes_explain/dataset/validation_set/Original/071,0.0,0.3493236,0.08828545,104_0.png
197 | ../deep_fakes_explain/dataset/validation_set/Original/043,0.0,0.7589952,0.9970618,211_0.png
198 | ../deep_fakes_explain/dataset/validation_set/Original/099,0.0,0.23495859,0.45832977,220_0.png
199 | ../deep_fakes_explain/dataset/validation_set/Original/045,0.0,0.12082268,0.0041052303,59_0.png
200 | ../deep_fakes_explain/dataset/validation_set/Original/069,0.0,0.16267096,0.0014619352,744_0.png
201 | ../deep_fakes_explain/dataset/validation_set/Original/030,0.0,0.07430573,0.0006902066,1_0.png
202 |
--------------------------------------------------------------------------------
/model_test_train/results/tests/samples_list_FaceShifter_efficientnetB0_checkpoint89_All.csv:
--------------------------------------------------------------------------------
1 | video_name,label,video_prob,high_low_prob,example_file_name
2 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/082_103,1.0,0.90958136,0.99988747,296_0.png
3 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/037_072,1.0,0.99850005,0.9999167,276_0.png
4 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/000_003,1.0,0.93593514,0.99990785,104_0.png
5 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/081_087,1.0,0.8721388,0.999863,861_0.png
6 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/061_080,1.0,0.774152,0.99912316,202_0.png
7 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/041_063,1.0,0.999887,0.9999428,59_0.png
8 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/098_092,1.0,0.99518865,0.99992967,222_0.png
9 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/050_059,1.0,0.99992085,0.9999434,1_0.png
10 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/012_026,1.0,0.99480116,0.9999324,1_0.png
11 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/070_057,1.0,0.96991533,0.21109247,220_0.png
12 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/056_996,1.0,0.99991053,0.9999454,305_0.png
13 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/065_089,1.0,0.99931306,0.99991953,211_0.png
14 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/090_086,1.0,0.9974495,0.9999504,617_0.png
15 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/048_029,1.0,0.9999132,0.6850306,123_0.png
16 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/036_035,1.0,0.99972194,0.9999409,249_0.png
17 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/023_923,1.0,0.94617695,0.3934481,220_0.png
18 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/005_010,1.0,0.99545604,0.99993265,144_0.png
19 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/001_870,1.0,0.98963827,0.47472116,220_0.png
20 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/024_073,1.0,0.5883694,0.9998685,222_0.png
21 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/027_009,1.0,0.9187239,0.99945575,232_0.png
22 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/040_997,1.0,0.9851559,0.9999372,220_0.png
23 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/051_332,1.0,0.8753263,0.99990845,582_0.png
24 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/097_033,1.0,0.88826525,0.9998578,104_0.png
25 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/019_018,1.0,0.9931197,0.99993813,667_0.png
26 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/034_590,1.0,0.98747915,0.9999471,81_0.png
27 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/096_101,1.0,0.9680503,0.99990034,104_0.png
28 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/075_977,1.0,0.88203156,0.9998697,81_0.png
29 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/026_012,1.0,0.8261243,0.9093747,59_0.png
30 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/044_945,1.0,0.9931838,0.9999374,296_0.png
31 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/049_946,1.0,0.55387527,0.98697335,1_0.png
32 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/062_066,1.0,0.99565583,0.9999076,351_0.png
33 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/087_081,1.0,0.9161461,0.99990606,104_0.png
34 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/042_084,1.0,0.99993736,0.99995375,296_0.png
35 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/094_111,1.0,0.9360259,0.5672142,407_0.png
36 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/067_025,1.0,0.9999359,0.9999542,276_0.png
37 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/055_147,1.0,0.9997498,0.9999459,222_0.png
38 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/068_028,1.0,0.9992533,0.9999275,59_0.png
39 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/064_991,1.0,0.9482915,0.999853,226_0.png
40 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/085_124,1.0,0.9703084,0.99990535,216_0.png
41 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/095_053,1.0,0.98529273,0.99989116,276_0.png
42 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/080_061,1.0,0.87726384,0.9990879,220_0.png
43 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/006_002,1.0,0.99958354,0.9999386,59_0.png
44 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/025_067,1.0,0.9837745,0.9999057,276_0.png
45 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/039_058,1.0,0.9215536,0.9997764,144_0.png
46 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/011_805,1.0,0.9459359,0.9870833,232_0.png
47 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/029_048,1.0,0.9998991,0.99994385,1_0.png
48 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/058_039,1.0,0.89839476,0.9998288,667_0.png
49 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/071_054,1.0,0.9962616,0.9127761,216_0.png
50 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/072_037,1.0,0.9023534,0.9997471,59_0.png
51 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/009_027,1.0,0.96107304,0.9999554,1_0.png
52 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/003_000,1.0,0.7056494,0.99612635,222_0.png
53 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/054_071,1.0,0.99971354,0.9999459,211_0.png
54 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/063_041,1.0,0.6419253,0.9987355,917_0.png
55 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/073_024,1.0,0.99977744,0.9999355,182_0.png
56 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/092_098,1.0,0.99245006,0.9999232,216_0.png
57 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/091_116,1.0,0.92728156,0.6415829,81_0.png
58 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/078_955,1.0,0.9993238,0.9999378,220_0.png
59 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/045_889,1.0,0.9952111,0.9999206,123_0.png
60 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/053_095,1.0,0.80479634,0.99968755,383_0.png
61 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/043_110,1.0,0.9917335,0.9999232,202_0.png
62 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/010_005,1.0,0.9367861,0.39045307,220_0.png
63 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/047_862,1.0,0.99988645,0.9999498,251_0.png
64 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/052_108,1.0,0.8768326,0.9996793,276_0.png
65 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/057_070,1.0,0.9682829,0.99995244,296_0.png
66 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/038_125,1.0,0.6014309,0.996554,110_0.png
67 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/013_883,1.0,0.8451169,0.9994696,202_0.png
68 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/022_489,1.0,0.8607715,0.9999198,251_0.png
69 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/093_121,1.0,0.9999345,0.9999515,226_0.png
70 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/004_982,1.0,0.9343319,0.99990535,104_0.png
71 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/084_042,1.0,0.9983177,0.9999404,351_0.png
72 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/069_961,1.0,0.8870767,0.99994516,276_0.png
73 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/083_213,1.0,0.8486042,0.60319984,81_0.png
74 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/014_790,1.0,0.95015955,0.70860565,582_0.png
75 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/002_006,1.0,0.99865973,0.9999237,81_0.png
76 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/031_163,1.0,0.8245697,0.045778356,220_0.png
77 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/035_036,1.0,0.81820977,0.98567075,226_0.png
78 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/032_944,1.0,0.999314,0.9999305,305_0.png
79 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/077_100,1.0,0.91432554,0.9998728,211_0.png
80 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/089_065,1.0,0.9997155,0.9999175,232_0.png
81 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/028_068,1.0,0.9446937,0.99993837,1_0.png
82 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/015_919,1.0,0.89538765,0.99974793,202_0.png
83 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/007_132,1.0,0.42592221,0.019549169,305_0.png
84 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/008_990,1.0,0.99990827,0.99995244,296_0.png
85 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/033_097,1.0,0.09880324,0.008263043,669_2.png
86 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/016_209,1.0,0.608873,0.98683584,407_0.png
87 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/059_050,1.0,0.99989605,0.9999523,688_0.png
88 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/021_312,1.0,0.84651035,0.9997774,216_0.png
89 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/088_060,1.0,0.9946872,0.9999337,182_0.png
90 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/018_019,1.0,0.91319466,0.99987245,144_0.png
91 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/066_062,1.0,0.7623327,0.9996455,220_0.png
92 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/060_088,1.0,0.79154295,0.87852883,744_0.png
93 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/086_090,1.0,0.9903777,0.9999417,182_0.png
94 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/046_904,1.0,0.9788088,0.99992204,211_0.png
95 | ../deep_fakes_explain/dataset/validation_set/Original/007,0.0,0.019099837,0.0007881882,144_0.png
96 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/030_193,1.0,0.901402,0.99987483,81_0.png
97 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/017_803,1.0,0.9993772,0.9999366,144_0.png
98 | ../deep_fakes_explain/dataset/validation_set/Original/040,0.0,0.14705108,0.0012253801,211_0.png
99 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/076_079,1.0,0.99989367,0.99994826,59_0.png
100 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/074_825,1.0,0.9965281,0.13659942,220_0.png
101 | ../deep_fakes_explain/dataset/validation_set/Original/028,0.0,0.185574,0.0010462767,123_0.png
102 | ../deep_fakes_explain/dataset/validation_set/Original/001,0.0,0.022960376,0.00025944033,351_0.png
103 | ../deep_fakes_explain/dataset/validation_set/Original/054,0.0,0.007896773,0.00034712433,296_0.png
104 | ../deep_fakes_explain/dataset/validation_set/Original/049,0.0,0.00958485,0.0003448864,251_0.png
105 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/020_344,1.0,0.9999258,0.776114,226_0.png
106 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/079_076,1.0,0.9998539,0.9999441,226_0.png
107 | ../deep_fakes_explain/dataset/validation_set/Original/024,0.0,0.075362645,0.00060971914,211_0.png
108 | ../deep_fakes_explain/dataset/validation_set/Original/058,0.0,0.08808818,0.0021179626,351_0.png
109 | ../deep_fakes_explain/dataset/validation_set/Original/059,0.0,0.045393225,0.00040419164,232_0.png
110 | ../deep_fakes_explain/dataset/validation_set/Original/064,0.0,0.7133512,0.9995623,232_0.png
111 | ../deep_fakes_explain/dataset/validation_set/Original/051,0.0,0.00055479753,0.00029249367,296_0.png
112 | ../deep_fakes_explain/dataset/validation_set/Original/032,0.0,0.3419103,0.0076987715,222_0.png
113 | ../deep_fakes_explain/dataset/validation_set/Original/065,0.0,0.14039497,0.0008060557,123_0.png
114 | ../deep_fakes_explain/dataset/validation_set/Original/017,0.0,0.17780475,0.0007817579,351_0.png
115 | ../deep_fakes_explain/dataset/validation_set/Original/083,0.0,0.73791677,0.9039909,426_1.png
116 | ../deep_fakes_explain/dataset/validation_set/Original/078,0.0,0.048326626,0.0010874697,216_0.png
117 | ../deep_fakes_explain/dataset/validation_set/Original/097,0.0,0.27873847,0.009365472,232_0.png
118 | ../deep_fakes_explain/dataset/validation_set/Original/055,0.0,0.4262916,0.0048973216,211_0.png
119 | ../deep_fakes_explain/dataset/validation_set/Original/004,0.0,0.20794038,0.0009891909,182_0.png
120 | ../deep_fakes_explain/dataset/validation_set/Original/067,0.0,0.08112078,0.001752552,417_0.png
121 | ../deep_fakes_explain/dataset/validation_set/Original/012,0.0,0.13121295,0.000598288,276_0.png
122 | ../deep_fakes_explain/dataset/validation_set/FaceShifter/099_295,1.0,0.97704333,0.9999143,296_0.png
123 | ../deep_fakes_explain/dataset/validation_set/Original/096,0.0,0.026038436,0.0003648388,249_0.png
124 | ../deep_fakes_explain/dataset/validation_set/Original/084,0.0,0.13298994,0.0010836126,81_0.png
125 | ../deep_fakes_explain/dataset/validation_set/Original/009,0.0,0.718277,0.9998516,1_0.png
126 | ../deep_fakes_explain/dataset/validation_set/Original/092,0.0,0.0626341,0.00041590809,202_0.png
127 | ../deep_fakes_explain/dataset/validation_set/Original/091,0.0,0.16075934,0.030915344,144_0.png
128 | ../deep_fakes_explain/dataset/validation_set/Original/046,0.0,0.038286112,0.0005906091,220_0.png
129 | ../deep_fakes_explain/dataset/validation_set/Original/061,0.0,0.0658887,0.00070842874,232_0.png
130 | ../deep_fakes_explain/dataset/validation_set/Original/087,0.0,0.07644729,0.00046623856,232_0.png
131 | ../deep_fakes_explain/dataset/validation_set/Original/057,0.0,0.007726637,0.00031345533,123_0.png
132 | ../deep_fakes_explain/dataset/validation_set/Original/003,0.0,0.0064405273,0.0004076487,110_0.png
133 | ../deep_fakes_explain/dataset/validation_set/Original/029,0.0,0.26350927,0.012760827,144_0.png
134 | ../deep_fakes_explain/dataset/validation_set/Original/002,0.0,0.0066675185,0.00038684814,603_0.png
135 | ../deep_fakes_explain/dataset/validation_set/Original/034,0.0,0.0520717,0.00039743716,383_0.png
136 | ../deep_fakes_explain/dataset/validation_set/Original/021,0.0,0.30078745,0.011267794,305_0.png
137 | ../deep_fakes_explain/dataset/validation_set/Original/038,0.0,0.05089863,0.0014802099,222_0.png
138 | ../deep_fakes_explain/dataset/validation_set/Original/015,0.0,0.08380942,0.004673872,296_0.png
139 | ../deep_fakes_explain/dataset/validation_set/Original/081,0.0,0.04286192,0.00051653024,232_0.png
140 | ../deep_fakes_explain/dataset/validation_set/Original/047,0.0,0.47321573,0.015360911,144_0.png
141 | ../deep_fakes_explain/dataset/validation_set/Original/086,0.0,0.36419308,0.041170016,667_0.png
142 | ../deep_fakes_explain/dataset/validation_set/Original/025,0.0,0.5110241,0.0058552963,81_0.png
143 | ../deep_fakes_explain/dataset/validation_set/Original/085,0.0,0.015574703,0.00019339214,211_0.png
144 | ../deep_fakes_explain/dataset/validation_set/Original/008,0.0,0.012466999,0.00033319776,182_0.png
145 | ../deep_fakes_explain/dataset/validation_set/Original/073,0.0,0.14701243,0.00028651382,603_0.png
146 | ../deep_fakes_explain/dataset/validation_set/Original/022,0.0,0.30402783,0.005144331,249_0.png
147 | ../deep_fakes_explain/dataset/validation_set/Original/014,0.0,0.23352577,0.21293849,620_0.png
148 | ../deep_fakes_explain/dataset/validation_set/Original/005,0.0,0.032066744,0.0016819175,305_1.png
149 | ../deep_fakes_explain/dataset/validation_set/Original/068,0.0,0.27261528,0.0026739545,123_0.png
150 | ../deep_fakes_explain/dataset/validation_set/Original/053,0.0,0.081394464,0.00030341017,211_0.png
151 | ../deep_fakes_explain/dataset/validation_set/Original/094,0.0,0.41570425,0.10956628,162_1.png
152 | ../deep_fakes_explain/dataset/validation_set/Original/013,0.0,0.011166614,0.0005450935,123_0.png
153 | ../deep_fakes_explain/dataset/validation_set/Original/018,0.0,0.2568315,0.0008767645,202_0.png
154 | ../deep_fakes_explain/dataset/validation_set/Original/050,0.0,0.3901606,0.0632311,211_0.png
155 | ../deep_fakes_explain/dataset/validation_set/Original/027,0.0,0.66694665,0.66694665,143_1.png
156 | ../deep_fakes_explain/dataset/validation_set/Original/060,0.0,0.23908357,0.03749671,1_0.png
157 | ../deep_fakes_explain/dataset/validation_set/Original/066,0.0,0.032823216,0.0015409103,305_0.png
158 | ../deep_fakes_explain/dataset/validation_set/Original/072,0.0,0.31113556,0.010056663,251_0.png
159 | ../deep_fakes_explain/dataset/validation_set/Original/088,0.0,0.025390005,0.0005013456,351_0.png
160 | ../deep_fakes_explain/dataset/validation_set/Original/062,0.0,0.17610374,0.000440764,305_0.png
161 | ../deep_fakes_explain/dataset/validation_set/Original/019,0.0,0.42165923,0.034127377,220_0.png
162 | ../deep_fakes_explain/dataset/validation_set/Original/041,0.0,0.074100666,0.0005968545,110_0.png
163 | ../deep_fakes_explain/dataset/validation_set/Original/076,0.0,0.15731053,0.000777189,81_0.png
164 | ../deep_fakes_explain/dataset/validation_set/Original/098,0.0,0.024264248,0.0010942505,216_0.png
165 | ../deep_fakes_explain/dataset/validation_set/Original/063,0.0,0.16128919,0.0008019851,276_0.png
166 | ../deep_fakes_explain/dataset/validation_set/Original/037,0.0,0.026885135,0.00034899914,182_0.png
167 | ../deep_fakes_explain/dataset/validation_set/Original/023,0.0,0.20741005,0.31245244,220_0.png
168 | ../deep_fakes_explain/dataset/validation_set/Original/035,0.0,0.28481936,0.0022529801,182_0.png
169 | ../deep_fakes_explain/dataset/validation_set/Original/033,0.0,0.03947296,0.012914691,603_0.png
170 | ../deep_fakes_explain/dataset/validation_set/Original/075,0.0,0.0037737724,0.00022003727,276_0.png
171 | ../deep_fakes_explain/dataset/validation_set/Original/082,0.0,0.0011867476,0.00029767226,251_0.png
172 | ../deep_fakes_explain/dataset/validation_set/Original/016,0.0,0.11497501,0.0067070387,658_1.png
173 | ../deep_fakes_explain/dataset/validation_set/Original/036,0.0,0.33824733,0.018248927,249_0.png
174 | ../deep_fakes_explain/dataset/validation_set/Original/077,0.0,0.40542197,0.0038475338,202_0.png
175 | ../deep_fakes_explain/dataset/validation_set/Original/042,0.0,0.018161891,0.00016859183,104_0.png
176 | ../deep_fakes_explain/dataset/validation_set/Original/079,0.0,0.3315651,0.003137986,305_0.png
177 | ../deep_fakes_explain/dataset/validation_set/Original/080,0.0,0.32498786,0.0070975744,351_0.png
178 | ../deep_fakes_explain/dataset/validation_set/Original/011,0.0,0.9034193,0.99649733,447_1.png
179 | ../deep_fakes_explain/dataset/validation_set/Original/056,0.0,0.48727208,0.0068734577,296_0.png
180 | ../deep_fakes_explain/dataset/validation_set/Original/089,0.0,0.4474884,0.0074283937,296_0.png
181 | ../deep_fakes_explain/dataset/validation_set/Original/048,0.0,0.082124576,0.0020456014,104_0.png
182 | ../deep_fakes_explain/dataset/validation_set/Original/010,0.0,0.2605654,0.0004641971,417_0.png
183 | ../deep_fakes_explain/dataset/validation_set/Original/095,0.0,0.37147158,0.31249788,681_1.png
184 | ../deep_fakes_explain/dataset/validation_set/Original/026,0.0,0.27066436,0.097405404,123_0.png
185 | ../deep_fakes_explain/dataset/validation_set/Original/006,0.0,0.09833457,0.0009872802,110_0.png
186 | ../deep_fakes_explain/dataset/validation_set/Original/000,0.0,0.022572933,0.0005651052,351_0.png
187 | ../deep_fakes_explain/dataset/validation_set/Original/074,0.0,0.110990286,0.0074868826,216_0.png
188 | ../deep_fakes_explain/dataset/validation_set/Original/044,0.0,0.23673336,0.0047577643,216_0.png
189 | ../deep_fakes_explain/dataset/validation_set/Original/052,0.0,0.0016064163,0.0002879839,220_0.png
190 | ../deep_fakes_explain/dataset/validation_set/Original/093,0.0,0.20884037,0.0005126162,59_0.png
191 | ../deep_fakes_explain/dataset/validation_set/Original/039,0.0,0.22989762,0.00060701167,351_0.png
192 | ../deep_fakes_explain/dataset/validation_set/Original/070,0.0,0.15479012,0.06447731,220_0.png
193 | ../deep_fakes_explain/dataset/validation_set/Original/090,0.0,0.023865988,0.00050994504,232_0.png
194 | ../deep_fakes_explain/dataset/validation_set/Original/071,0.0,0.3493236,0.08828545,104_0.png
195 | ../deep_fakes_explain/dataset/validation_set/Original/031,0.0,0.13467361,0.06395149,53_1.png
196 | ../deep_fakes_explain/dataset/validation_set/Original/020,0.0,0.21335758,0.113004334,351_0.png
197 | ../deep_fakes_explain/dataset/validation_set/Original/043,0.0,0.7589952,0.9970618,211_0.png
198 | ../deep_fakes_explain/dataset/validation_set/Original/099,0.0,0.23495859,0.45832977,220_0.png
199 | ../deep_fakes_explain/dataset/validation_set/Original/045,0.0,0.12082268,0.0041052303,59_0.png
200 | ../deep_fakes_explain/dataset/validation_set/Original/069,0.0,0.16267096,0.0014619352,744_0.png
201 | ../deep_fakes_explain/dataset/validation_set/Original/030,0.0,0.07430573,0.0006902066,1_0.png
202 |
--------------------------------------------------------------------------------
/model_test_train/results/tests/samples_list_FaceSwap_efficientnetB0_checkpoint89_All.csv:
--------------------------------------------------------------------------------
1 | video_name,label,video_prob,high_low_prob,example_file_name
2 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/082_103,1.0,0.9418835,0.99993503,104_0.png
3 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/266_252,1.0,0.9406115,0.9999515,144_0.png
4 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/041_063,1.0,0.982821,0.9999609,1_0.png
5 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/000_003,1.0,0.9998625,0.99995685,220_0.png
6 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/037_072,1.0,0.9912635,0.9999285,296_0.png
7 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/081_087,1.0,0.999924,0.9999609,59_0.png
8 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/050_059,1.0,0.99995595,0.9999647,276_0.png
9 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/012_026,1.0,0.9998897,0.99995625,202_0.png
10 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/098_092,1.0,0.99904484,0.9999583,276_0.png
11 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/090_086,1.0,0.93464357,0.9999306,1_0.png
12 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/070_057,1.0,0.99993753,0.5407529,81_0.png
13 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/048_029,1.0,0.74930084,0.85438687,211_0.png
14 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/056_996,1.0,0.9999363,0.99996126,202_0.png
15 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/061_080,1.0,0.9231125,0.9999094,276_0.png
16 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/036_035,1.0,0.8520589,0.99994993,249_0.png
17 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/023_923,1.0,0.9999566,0.99996305,220_0.png
18 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/065_089,1.0,0.9967473,0.9999099,407_0.png
19 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/001_870,1.0,0.99994344,0.9999676,276_0.png
20 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/005_010,1.0,0.99994105,0.00095430063,220_0.png
21 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/040_997,1.0,0.9999434,0.99996006,123_0.png
22 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/027_009,1.0,0.65796787,0.9998301,123_0.png
23 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/024_073,1.0,0.95591587,0.99994123,202_0.png
24 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/051_332,1.0,0.96873325,0.9999565,603_0.png
25 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/097_033,1.0,0.99841434,0.99995685,417_0.png
26 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/096_101,1.0,0.9999208,0.99996066,305_0.png
27 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/019_018,1.0,0.9996466,0.99994326,123_0.png
28 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/034_590,1.0,0.9569311,0.9999478,182_0.png
29 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/044_945,1.0,0.99968636,0.9999368,81_0.png
30 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/049_946,1.0,0.9984186,0.9999442,1_0.png
31 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/075_977,1.0,0.99890226,0.9999331,144_0.png
32 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/062_066,1.0,0.9998503,0.9999602,202_0.png
33 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/087_081,1.0,0.9999178,0.9999614,202_0.png
34 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/026_012,1.0,0.97489804,0.86869174,104_0.png
35 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/094_111,1.0,0.9674694,0.69114316,81_0.png
36 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/067_025,1.0,0.9976903,0.9999231,417_0.png
37 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/042_084,1.0,0.99932146,0.99995995,276_0.png
38 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/055_147,1.0,0.99995834,0.99996877,226_0.png
39 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/011_805,1.0,0.9999626,0.97715896,211_0.png
40 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/085_124,1.0,0.9509066,0.9999517,296_0.png
41 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/064_991,1.0,0.9509431,0.9999418,123_0.png
42 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/080_061,1.0,0.99994296,0.99996305,251_0.png
43 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/006_002,1.0,0.9808426,0.9999342,232_0.png
44 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/095_053,1.0,0.999957,0.9999664,211_0.png
45 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/068_028,1.0,0.99970573,0.99996114,276_0.png
46 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/039_058,1.0,0.99993604,0.9999585,81_0.png
47 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/029_048,1.0,0.99970645,0.99995375,104_0.png
48 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/009_027,1.0,0.95157367,0.9999503,182_0.png
49 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/058_039,1.0,0.99986553,0.9999629,305_0.png
50 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/072_037,1.0,0.9912861,0.99995446,110_0.png
51 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/003_000,1.0,0.9993774,0.99995553,144_0.png
52 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/071_054,1.0,0.999947,0.8599656,296_0.png
53 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/091_116,1.0,0.94328326,0.44965494,296_0.png
54 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/063_041,1.0,0.9996832,0.99995863,1_0.png
55 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/073_024,1.0,0.44854924,0.0067922315,276_0.png
56 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/092_098,1.0,0.9995142,0.9999558,249_0.png
57 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/054_071,1.0,0.91258186,0.9999138,59_0.png
58 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/078_955,1.0,0.9758827,0.9999417,407_0.png
59 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/043_110,1.0,0.9989359,0.9999496,202_0.png
60 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/053_095,1.0,0.96805805,0.9998516,407_0.png
61 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/045_889,1.0,0.96801805,0.99986994,110_0.png
62 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/047_862,1.0,0.99978906,0.9999703,123_0.png
63 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/010_005,1.0,0.94998753,0.999949,313_1.png
64 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/038_125,1.0,0.98373073,0.9999466,104_0.png
65 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/057_070,1.0,0.9998399,0.9999628,226_0.png
66 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/084_042,1.0,0.9938577,0.9999343,226_0.png
67 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/013_883,1.0,0.9999389,0.9999596,123_0.png
68 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/004_982,1.0,0.999787,0.9999523,123_0.png
69 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/052_108,1.0,0.77603644,0.99989104,182_0.png
70 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/022_489,1.0,0.99923927,0.9999598,110_0.png
71 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/083_213,1.0,0.6689138,0.8569324,370_1.png
72 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/069_961,1.0,0.9479929,0.99994886,305_0.png
73 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/093_121,1.0,0.99989647,0.9999454,276_0.png
74 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/031_163,1.0,0.9931989,0.024410425,220_0.png
75 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/028_068,1.0,0.93529004,0.99995327,220_0.png
76 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/002_006,1.0,0.7706998,0.9997603,202_0.png
77 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/014_790,1.0,0.99616414,0.7824512,582_0.png
78 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/032_944,1.0,0.96768695,0.99994123,251_0.png
79 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/035_036,1.0,0.9101693,0.99993503,81_0.png
80 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/033_097,1.0,0.6247998,0.120685615,232_0.png
81 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/077_100,1.0,0.9998288,0.999943,123_0.png
82 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/089_065,1.0,0.9624959,0.9999428,182_0.png
83 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/007_132,1.0,0.8536186,0.999918,202_0.png
84 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/015_919,1.0,0.98942024,0.999956,232_0.png
85 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/008_990,1.0,0.9887442,0.9999535,226_0.png
86 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/016_209,1.0,0.5521463,0.9997633,202_0.png
87 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/088_060,1.0,0.9999428,0.99995947,633_0.png
88 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/018_019,1.0,0.99313813,0.9999583,276_0.png
89 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/059_050,1.0,0.999949,0.99996316,226_0.png
90 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/021_312,1.0,0.99985135,0.99996185,351_0.png
91 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/086_090,1.0,0.99995375,0.9999634,276_0.png
92 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/030_193,1.0,0.8051947,0.9999162,226_0.png
93 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/060_088,1.0,0.9997079,0.76683587,216_0.png
94 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/046_904,1.0,0.98961717,0.99991703,182_0.png
95 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/066_062,1.0,0.99956846,0.6163779,220_0.png
96 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/017_803,1.0,0.9998361,0.99995315,104_0.png
97 | ../deep_fakes_explain/dataset/validation_set/Original/007,0.0,0.019099837,0.0007881882,144_0.png
98 | ../deep_fakes_explain/dataset/validation_set/Original/028,0.0,0.185574,0.0010462767,123_0.png
99 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/076_079,1.0,0.9695094,0.9999472,110_0.png
100 | ../deep_fakes_explain/dataset/validation_set/Original/040,0.0,0.14705108,0.0012253801,211_0.png
101 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/074_825,1.0,0.65885776,0.11788329,220_0.png
102 | ../deep_fakes_explain/dataset/validation_set/Original/054,0.0,0.007896773,0.00034712433,296_0.png
103 | ../deep_fakes_explain/dataset/validation_set/Original/001,0.0,0.022960376,0.00025944033,351_0.png
104 | ../deep_fakes_explain/dataset/validation_set/Original/049,0.0,0.00958485,0.0003448864,251_0.png
105 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/020_344,1.0,0.90189,0.5985923,1_0.png
106 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/079_076,1.0,0.9773766,0.99994564,104_0.png
107 | ../deep_fakes_explain/dataset/validation_set/Original/058,0.0,0.08808818,0.0021179626,351_0.png
108 | ../deep_fakes_explain/dataset/validation_set/Original/024,0.0,0.075362645,0.00060971914,211_0.png
109 | ../deep_fakes_explain/dataset/validation_set/Original/064,0.0,0.7133512,0.9995623,232_0.png
110 | ../deep_fakes_explain/dataset/validation_set/Original/051,0.0,0.00055479753,0.00029249367,296_0.png
111 | ../deep_fakes_explain/dataset/validation_set/FaceSwap/099_295,1.0,0.9324791,0.9999316,211_0.png
112 | ../deep_fakes_explain/dataset/validation_set/Original/032,0.0,0.3419103,0.0076987715,222_0.png
113 | ../deep_fakes_explain/dataset/validation_set/Original/065,0.0,0.14039497,0.0008060557,123_0.png
114 | ../deep_fakes_explain/dataset/validation_set/Original/059,0.0,0.045393225,0.00040419164,232_0.png
115 | ../deep_fakes_explain/dataset/validation_set/Original/017,0.0,0.17780475,0.0007817579,351_0.png
116 | ../deep_fakes_explain/dataset/validation_set/Original/097,0.0,0.27873847,0.009365472,232_0.png
117 | ../deep_fakes_explain/dataset/validation_set/Original/055,0.0,0.4262916,0.0048973216,211_0.png
118 | ../deep_fakes_explain/dataset/validation_set/Original/004,0.0,0.20794038,0.0009891909,182_0.png
119 | ../deep_fakes_explain/dataset/validation_set/Original/078,0.0,0.048326626,0.0010874697,216_0.png
120 | ../deep_fakes_explain/dataset/validation_set/Original/083,0.0,0.73791677,0.9039909,426_1.png
121 | ../deep_fakes_explain/dataset/validation_set/Original/012,0.0,0.13121295,0.000598288,276_0.png
122 | ../deep_fakes_explain/dataset/validation_set/Original/067,0.0,0.08112078,0.001752552,417_0.png
123 | ../deep_fakes_explain/dataset/validation_set/Original/009,0.0,0.718277,0.9998516,1_0.png
124 | ../deep_fakes_explain/dataset/validation_set/Original/096,0.0,0.026038436,0.0003648388,249_0.png
125 | ../deep_fakes_explain/dataset/validation_set/Original/092,0.0,0.0626341,0.00041590809,202_0.png
126 | ../deep_fakes_explain/dataset/validation_set/Original/084,0.0,0.13298994,0.0010836126,81_0.png
127 | ../deep_fakes_explain/dataset/validation_set/Original/061,0.0,0.0658887,0.00070842874,232_0.png
128 | ../deep_fakes_explain/dataset/validation_set/Original/029,0.0,0.26350927,0.012760827,144_0.png
129 | ../deep_fakes_explain/dataset/validation_set/Original/046,0.0,0.038286112,0.0005906091,220_0.png
130 | ../deep_fakes_explain/dataset/validation_set/Original/087,0.0,0.07644729,0.00046623856,232_0.png
131 | ../deep_fakes_explain/dataset/validation_set/Original/002,0.0,0.0066675185,0.00038684814,603_0.png
132 | ../deep_fakes_explain/dataset/validation_set/Original/057,0.0,0.007726637,0.00031345533,123_0.png
133 | ../deep_fakes_explain/dataset/validation_set/Original/034,0.0,0.0520717,0.00039743716,383_0.png
134 | ../deep_fakes_explain/dataset/validation_set/Original/091,0.0,0.16075934,0.030915344,144_0.png
135 | ../deep_fakes_explain/dataset/validation_set/Original/003,0.0,0.0064405273,0.0004076487,110_0.png
136 | ../deep_fakes_explain/dataset/validation_set/Original/086,0.0,0.36419308,0.041170016,667_0.png
137 | ../deep_fakes_explain/dataset/validation_set/Original/073,0.0,0.14701243,0.00028651382,603_0.png
138 | ../deep_fakes_explain/dataset/validation_set/Original/015,0.0,0.08380942,0.004673872,296_0.png
139 | ../deep_fakes_explain/dataset/validation_set/Original/021,0.0,0.30078745,0.011267794,305_0.png
140 | ../deep_fakes_explain/dataset/validation_set/Original/047,0.0,0.47321573,0.015360911,144_0.png
141 | ../deep_fakes_explain/dataset/validation_set/Original/038,0.0,0.05089863,0.0014802099,222_0.png
142 | ../deep_fakes_explain/dataset/validation_set/Original/094,0.0,0.41570425,0.10956628,162_1.png
143 | ../deep_fakes_explain/dataset/validation_set/Original/081,0.0,0.04286192,0.00051653024,232_0.png
144 | ../deep_fakes_explain/dataset/validation_set/Original/085,0.0,0.015574703,0.00019339214,211_0.png
145 | ../deep_fakes_explain/dataset/validation_set/Original/025,0.0,0.5110241,0.0058552963,81_0.png
146 | ../deep_fakes_explain/dataset/validation_set/Original/008,0.0,0.012466999,0.00033319776,182_0.png
147 | ../deep_fakes_explain/dataset/validation_set/Original/022,0.0,0.30402783,0.005144331,249_0.png
148 | ../deep_fakes_explain/dataset/validation_set/Original/068,0.0,0.27261528,0.0026739545,123_0.png
149 | ../deep_fakes_explain/dataset/validation_set/Original/005,0.0,0.032066744,0.0016819175,305_1.png
150 | ../deep_fakes_explain/dataset/validation_set/Original/014,0.0,0.23352577,0.21293849,620_0.png
151 | ../deep_fakes_explain/dataset/validation_set/Original/053,0.0,0.081394464,0.00030341017,211_0.png
152 | ../deep_fakes_explain/dataset/validation_set/Original/066,0.0,0.032823216,0.0015409103,305_0.png
153 | ../deep_fakes_explain/dataset/validation_set/Original/027,0.0,0.66694665,0.66694665,143_1.png
154 | ../deep_fakes_explain/dataset/validation_set/Original/018,0.0,0.2568315,0.0008767645,202_0.png
155 | ../deep_fakes_explain/dataset/validation_set/Original/088,0.0,0.025390005,0.0005013456,351_0.png
156 | ../deep_fakes_explain/dataset/validation_set/Original/072,0.0,0.31113556,0.010056663,251_0.png
157 | ../deep_fakes_explain/dataset/validation_set/Original/013,0.0,0.011166614,0.0005450935,123_0.png
158 | ../deep_fakes_explain/dataset/validation_set/Original/076,0.0,0.15731053,0.000777189,81_0.png
159 | ../deep_fakes_explain/dataset/validation_set/Original/050,0.0,0.3901606,0.0632311,211_0.png
160 | ../deep_fakes_explain/dataset/validation_set/Original/060,0.0,0.23908357,0.03749671,1_0.png
161 | ../deep_fakes_explain/dataset/validation_set/Original/062,0.0,0.17610374,0.000440764,305_0.png
162 | ../deep_fakes_explain/dataset/validation_set/Original/019,0.0,0.42165923,0.034127377,220_0.png
163 | ../deep_fakes_explain/dataset/validation_set/Original/098,0.0,0.024264248,0.0010942505,216_0.png
164 | ../deep_fakes_explain/dataset/validation_set/Original/041,0.0,0.074100666,0.0005968545,110_0.png
165 | ../deep_fakes_explain/dataset/validation_set/Original/075,0.0,0.0037737724,0.00022003727,276_0.png
166 | ../deep_fakes_explain/dataset/validation_set/Original/037,0.0,0.026885135,0.00034899914,182_0.png
167 | ../deep_fakes_explain/dataset/validation_set/Original/023,0.0,0.20741005,0.31245244,220_0.png
168 | ../deep_fakes_explain/dataset/validation_set/Original/035,0.0,0.28481936,0.0022529801,182_0.png
169 | ../deep_fakes_explain/dataset/validation_set/Original/063,0.0,0.16128919,0.0008019851,276_0.png
170 | ../deep_fakes_explain/dataset/validation_set/Original/077,0.0,0.40542197,0.0038475338,202_0.png
171 | ../deep_fakes_explain/dataset/validation_set/Original/033,0.0,0.03947296,0.012914691,603_0.png
172 | ../deep_fakes_explain/dataset/validation_set/Original/016,0.0,0.11497501,0.0067070387,658_1.png
173 | ../deep_fakes_explain/dataset/validation_set/Original/082,0.0,0.0011867476,0.00029767226,251_0.png
174 | ../deep_fakes_explain/dataset/validation_set/Original/079,0.0,0.3315651,0.003137986,305_0.png
175 | ../deep_fakes_explain/dataset/validation_set/Original/036,0.0,0.33824733,0.018248927,249_0.png
176 | ../deep_fakes_explain/dataset/validation_set/Original/011,0.0,0.9034193,0.99649733,447_1.png
177 | ../deep_fakes_explain/dataset/validation_set/Original/042,0.0,0.018161891,0.00016859183,104_0.png
178 | ../deep_fakes_explain/dataset/validation_set/Original/056,0.0,0.48727208,0.0068734577,296_0.png
179 | ../deep_fakes_explain/dataset/validation_set/Original/089,0.0,0.4474884,0.0074283937,296_0.png
180 | ../deep_fakes_explain/dataset/validation_set/Original/080,0.0,0.32498786,0.0070975744,351_0.png
181 | ../deep_fakes_explain/dataset/validation_set/Original/048,0.0,0.082124576,0.0020456014,104_0.png
182 | ../deep_fakes_explain/dataset/validation_set/Original/010,0.0,0.2605654,0.0004641971,417_0.png
183 | ../deep_fakes_explain/dataset/validation_set/Original/095,0.0,0.37147158,0.31249788,681_1.png
184 | ../deep_fakes_explain/dataset/validation_set/Original/000,0.0,0.022572933,0.0005651052,351_0.png
185 | ../deep_fakes_explain/dataset/validation_set/Original/074,0.0,0.110990286,0.0074868826,216_0.png
186 | ../deep_fakes_explain/dataset/validation_set/Original/006,0.0,0.09833457,0.0009872802,110_0.png
187 | ../deep_fakes_explain/dataset/validation_set/Original/026,0.0,0.27066436,0.097405404,123_0.png
188 | ../deep_fakes_explain/dataset/validation_set/Original/093,0.0,0.20884037,0.0005126162,59_0.png
189 | ../deep_fakes_explain/dataset/validation_set/Original/039,0.0,0.22989762,0.00060701167,351_0.png
190 | ../deep_fakes_explain/dataset/validation_set/Original/031,0.0,0.13467361,0.06395149,53_1.png
191 | ../deep_fakes_explain/dataset/validation_set/Original/044,0.0,0.23673336,0.0047577643,216_0.png
192 | ../deep_fakes_explain/dataset/validation_set/Original/090,0.0,0.023865988,0.00050994504,232_0.png
193 | ../deep_fakes_explain/dataset/validation_set/Original/070,0.0,0.15479012,0.06447731,220_0.png
194 | ../deep_fakes_explain/dataset/validation_set/Original/071,0.0,0.3493236,0.08828545,104_0.png
195 | ../deep_fakes_explain/dataset/validation_set/Original/052,0.0,0.0016064163,0.0002879839,220_0.png
196 | ../deep_fakes_explain/dataset/validation_set/Original/020,0.0,0.21335758,0.113004334,351_0.png
197 | ../deep_fakes_explain/dataset/validation_set/Original/043,0.0,0.7589952,0.9970618,211_0.png
198 | ../deep_fakes_explain/dataset/validation_set/Original/045,0.0,0.12082268,0.0041052303,59_0.png
199 | ../deep_fakes_explain/dataset/validation_set/Original/069,0.0,0.16267096,0.0014619352,744_0.png
200 | ../deep_fakes_explain/dataset/validation_set/Original/099,0.0,0.23495859,0.45832977,220_0.png
201 | ../deep_fakes_explain/dataset/validation_set/Original/030,0.0,0.07430573,0.0006902066,1_0.png
202 |
--------------------------------------------------------------------------------
/model_test_train/results/tests/samples_list_NeuralTextures_efficientnetB0_checkpoint89_All.csv:
--------------------------------------------------------------------------------
1 | video_name,label,video_prob,high_low_prob,example_file_name
2 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/037_072,1.0,0.60442233,0.9936893,220_0.png
3 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/041_063,1.0,0.85006666,0.9974849,202_0.png
4 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/050_059,1.0,0.98002785,0.99926573,249_0.png
5 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/081_087,1.0,0.99177533,0.9996444,104_0.png
6 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/082_103,1.0,0.45074797,0.015513426,104_0.png
7 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/000_003,1.0,0.6059777,0.9949115,110_0.png
8 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/012_026,1.0,0.9071121,0.9997749,220_0.png
9 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/924_917,1.0,0.8981539,0.9998572,276_0.png
10 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/098_092,1.0,0.78591543,0.99734503,59_0.png
11 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/048_029,1.0,0.65234953,0.6174884,220_0.png
12 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/070_057,1.0,0.83386165,0.9475243,216_0.png
13 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/090_086,1.0,0.9725111,0.9997973,620_0.png
14 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/056_996,1.0,0.99747247,0.9999341,226_0.png
15 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/061_080,1.0,0.8496957,0.99953425,211_0.png
16 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/036_035,1.0,0.94959354,0.9998122,110_0.png
17 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/065_089,1.0,0.92773753,0.9994405,226_0.png
18 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/023_923,1.0,0.8989978,0.9992563,144_0.png
19 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/005_010,1.0,0.9046158,0.0012587593,276_0.png
20 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/001_870,1.0,0.9253704,0.7446583,220_0.png
21 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/024_073,1.0,0.87502414,0.99962187,202_0.png
22 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/027_009,1.0,0.7497221,0.37248436,220_0.png
23 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/097_033,1.0,0.9252073,0.99967146,351_0.png
24 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/040_997,1.0,0.9535204,0.999783,216_0.png
25 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/051_332,1.0,0.6724286,0.997601,641_0.png
26 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/019_018,1.0,0.8264885,0.99965835,383_0.png
27 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/096_101,1.0,0.8800958,0.99948335,1_0.png
28 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/049_946,1.0,0.75544846,0.9996636,182_0.png
29 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/075_977,1.0,0.62691355,0.9883909,182_0.png
30 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/062_066,1.0,0.8552499,0.99983835,276_0.png
31 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/026_012,1.0,0.8326475,0.9464841,305_0.png
32 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/034_590,1.0,0.53433913,0.033848032,202_0.png
33 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/044_945,1.0,0.99745315,0.99994636,81_0.png
34 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/094_111,1.0,0.999783,0.66379625,1_0.png
35 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/087_081,1.0,0.7116348,0.99954176,1_0.png
36 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/067_025,1.0,0.88996124,0.99971336,296_0.png
37 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/055_147,1.0,0.95721847,0.99988866,202_0.png
38 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/085_124,1.0,0.41441393,0.0028793023,104_0.png
39 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/042_084,1.0,0.7242906,0.9884774,202_0.png
40 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/064_991,1.0,0.9992992,0.9999274,305_0.png
41 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/080_061,1.0,0.97397166,0.9996761,226_0.png
42 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/095_053,1.0,0.8801074,0.99985075,81_0.png
43 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/006_002,1.0,0.9551801,0.9998976,305_0.png
44 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/025_067,1.0,0.6202902,0.99700433,305_0.png
45 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/068_028,1.0,0.9394596,0.99991536,1_0.png
46 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/011_805,1.0,0.96699685,0.9817187,1_0.png
47 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/039_058,1.0,0.8754568,0.9997929,1_0.png
48 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/029_048,1.0,0.9937739,0.9999131,1_0.png
49 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/058_039,1.0,0.37760386,0.003372013,582_0.png
50 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/009_027,1.0,0.9990956,0.99993217,1_0.png
51 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/072_037,1.0,0.99887925,0.34558335,220_0.png
52 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/003_000,1.0,0.39800477,0.030361136,144_0.png
53 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/091_116,1.0,0.98830384,0.43754813,249_0.png
54 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/071_054,1.0,0.49171036,0.07032829,202_0.png
55 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/078_955,1.0,0.5832577,0.99062634,305_0.png
56 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/054_071,1.0,0.5125748,0.0023227383,232_0.png
57 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/063_041,1.0,0.53155464,0.00507657,202_0.png
58 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/073_024,1.0,0.63538873,0.99422073,276_0.png
59 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/043_110,1.0,0.9423084,0.9998919,211_0.png
60 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/092_098,1.0,0.93832684,0.9997793,104_0.png
61 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/053_095,1.0,0.74683666,0.99824905,216_0.png
62 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/045_889,1.0,0.9965792,0.99982435,110_0.png
63 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/010_005,1.0,0.8963112,0.9335056,313_1.png
64 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/047_862,1.0,0.7190617,0.99985135,1_0.png
65 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/057_070,1.0,0.9360386,0.999814,305_0.png
66 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/038_125,1.0,0.56697285,0.9956168,59_0.png
67 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/013_883,1.0,0.33959338,0.010134903,123_0.png
68 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/083_213,1.0,0.686614,0.9719553,5_1.png
69 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/084_042,1.0,0.8192197,0.9997235,104_0.png
70 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/031_163,1.0,0.8136558,0.030874804,220_0.png
71 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/069_961,1.0,0.9301962,0.99981624,305_0.png
72 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/004_982,1.0,0.42337582,0.003941756,123_0.png
73 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/022_489,1.0,0.9029474,0.7207353,220_0.png
74 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/052_108,1.0,0.55473924,0.95759696,251_0.png
75 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/093_121,1.0,0.99423754,0.9999225,144_0.png
76 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/035_036,1.0,0.8738608,0.9994105,251_0.png
77 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/002_006,1.0,0.673452,0.99620456,232_0.png
78 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/028_068,1.0,0.9990412,0.9999337,202_0.png
79 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/089_065,1.0,0.9998369,0.99993896,296_0.png
80 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/014_790,1.0,0.44762963,0.16291843,14_1.png
81 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/032_944,1.0,0.99959356,0.9999461,276_0.png
82 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/077_100,1.0,0.5585101,0.948804,351_0.png
83 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/033_097,1.0,0.077023014,0.0036224858,5_1.png
84 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/015_919,1.0,0.9378214,0.99985445,144_0.png
85 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/007_132,1.0,0.3122831,0.0034599921,144_0.png
86 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/008_990,1.0,0.2238543,0.006130754,123_0.png
87 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/016_209,1.0,0.38075912,0.0022641313,168_1.png
88 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/088_060,1.0,0.7367114,0.9983986,296_0.png
89 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/059_050,1.0,0.7435667,0.99164915,226_0.png
90 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/021_312,1.0,0.36622915,0.0058720373,220_0.png
91 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/018_019,1.0,0.93359756,0.999764,81_0.png
92 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/030_193,1.0,0.78258926,0.9998535,1_0.png
93 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/066_062,1.0,0.7645535,0.31788418,220_0.png
94 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/060_088,1.0,0.41620803,0.0716463,612_2.png
95 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/086_090,1.0,0.9654808,0.99887055,351_0.png
96 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/046_904,1.0,0.97402614,0.99982625,351_0.png
97 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/017_803,1.0,0.98199177,0.9999448,226_0.png
98 | ../deep_fakes_explain/dataset/validation_set/Original/007,0.0,0.019099837,0.0007881882,144_0.png
99 | ../deep_fakes_explain/dataset/validation_set/Original/028,0.0,0.185574,0.0010462767,123_0.png
100 | ../deep_fakes_explain/dataset/validation_set/Original/040,0.0,0.14705108,0.0012253801,211_0.png
101 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/076_079,1.0,0.99774545,0.9999343,305_0.png
102 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/074_825,1.0,0.9996678,0.99991655,110_0.png
103 | ../deep_fakes_explain/dataset/validation_set/Original/001,0.0,0.022960376,0.00025944033,351_0.png
104 | ../deep_fakes_explain/dataset/validation_set/Original/054,0.0,0.007896773,0.00034712433,296_0.png
105 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/020_344,1.0,0.97991,0.7033265,59_0.png
106 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/079_076,1.0,0.99543583,0.9999167,251_0.png
107 | ../deep_fakes_explain/dataset/validation_set/Original/049,0.0,0.00958485,0.0003448864,251_0.png
108 | ../deep_fakes_explain/dataset/validation_set/Original/024,0.0,0.075362645,0.00060971914,211_0.png
109 | ../deep_fakes_explain/dataset/validation_set/Original/058,0.0,0.08808818,0.0021179626,351_0.png
110 | ../deep_fakes_explain/dataset/validation_set/Original/064,0.0,0.7133512,0.9995623,232_0.png
111 | ../deep_fakes_explain/dataset/validation_set/Original/032,0.0,0.3419103,0.0076987715,222_0.png
112 | ../deep_fakes_explain/dataset/validation_set/Original/051,0.0,0.00055479753,0.00029249367,296_0.png
113 | ../deep_fakes_explain/dataset/validation_set/Original/059,0.0,0.045393225,0.00040419164,232_0.png
114 | ../deep_fakes_explain/dataset/validation_set/Original/065,0.0,0.14039497,0.0008060557,123_0.png
115 | ../deep_fakes_explain/dataset/validation_set/Original/017,0.0,0.17780475,0.0007817579,351_0.png
116 | ../deep_fakes_explain/dataset/validation_set/Original/097,0.0,0.27873847,0.009365472,232_0.png
117 | ../deep_fakes_explain/dataset/validation_set/Original/055,0.0,0.4262916,0.0048973216,211_0.png
118 | ../deep_fakes_explain/dataset/validation_set/Original/078,0.0,0.048326626,0.0010874697,216_0.png
119 | ../deep_fakes_explain/dataset/validation_set/Original/004,0.0,0.20794038,0.0009891909,182_0.png
120 | ../deep_fakes_explain/dataset/validation_set/Original/083,0.0,0.73791677,0.9039909,426_1.png
121 | ../deep_fakes_explain/dataset/validation_set/NeuralTextures/099_295,1.0,0.8667477,0.87208027,153_1.png
122 | ../deep_fakes_explain/dataset/validation_set/Original/012,0.0,0.13121295,0.000598288,276_0.png
123 | ../deep_fakes_explain/dataset/validation_set/Original/067,0.0,0.08112078,0.001752552,417_0.png
124 | ../deep_fakes_explain/dataset/validation_set/Original/009,0.0,0.718277,0.9998516,1_0.png
125 | ../deep_fakes_explain/dataset/validation_set/Original/084,0.0,0.13298994,0.0010836126,81_0.png
126 | ../deep_fakes_explain/dataset/validation_set/Original/096,0.0,0.026038436,0.0003648388,249_0.png
127 | ../deep_fakes_explain/dataset/validation_set/Original/091,0.0,0.16075934,0.030915344,144_0.png
128 | ../deep_fakes_explain/dataset/validation_set/Original/061,0.0,0.0658887,0.00070842874,232_0.png
129 | ../deep_fakes_explain/dataset/validation_set/Original/092,0.0,0.0626341,0.00041590809,202_0.png
130 | ../deep_fakes_explain/dataset/validation_set/Original/057,0.0,0.007726637,0.00031345533,123_0.png
131 | ../deep_fakes_explain/dataset/validation_set/Original/087,0.0,0.07644729,0.00046623856,232_0.png
132 | ../deep_fakes_explain/dataset/validation_set/Original/034,0.0,0.0520717,0.00039743716,383_0.png
133 | ../deep_fakes_explain/dataset/validation_set/Original/029,0.0,0.26350927,0.012760827,144_0.png
134 | ../deep_fakes_explain/dataset/validation_set/Original/003,0.0,0.0064405273,0.0004076487,110_0.png
135 | ../deep_fakes_explain/dataset/validation_set/Original/046,0.0,0.038286112,0.0005906091,220_0.png
136 | ../deep_fakes_explain/dataset/validation_set/Original/002,0.0,0.0066675185,0.00038684814,603_0.png
137 | ../deep_fakes_explain/dataset/validation_set/Original/086,0.0,0.36419308,0.041170016,667_0.png
138 | ../deep_fakes_explain/dataset/validation_set/Original/038,0.0,0.05089863,0.0014802099,222_0.png
139 | ../deep_fakes_explain/dataset/validation_set/Original/021,0.0,0.30078745,0.011267794,305_0.png
140 | ../deep_fakes_explain/dataset/validation_set/Original/073,0.0,0.14701243,0.00028651382,603_0.png
141 | ../deep_fakes_explain/dataset/validation_set/Original/015,0.0,0.08380942,0.004673872,296_0.png
142 | ../deep_fakes_explain/dataset/validation_set/Original/047,0.0,0.47321573,0.015360911,144_0.png
143 | ../deep_fakes_explain/dataset/validation_set/Original/081,0.0,0.04286192,0.00051653024,232_0.png
144 | ../deep_fakes_explain/dataset/validation_set/Original/085,0.0,0.015574703,0.00019339214,211_0.png
145 | ../deep_fakes_explain/dataset/validation_set/Original/094,0.0,0.41570425,0.10956628,162_1.png
146 | ../deep_fakes_explain/dataset/validation_set/Original/008,0.0,0.012466999,0.00033319776,182_0.png
147 | ../deep_fakes_explain/dataset/validation_set/Original/022,0.0,0.30402783,0.005144331,249_0.png
148 | ../deep_fakes_explain/dataset/validation_set/Original/025,0.0,0.5110241,0.0058552963,81_0.png
149 | ../deep_fakes_explain/dataset/validation_set/Original/005,0.0,0.032066744,0.0016819175,305_1.png
150 | ../deep_fakes_explain/dataset/validation_set/Original/014,0.0,0.23352577,0.21293849,620_0.png
151 | ../deep_fakes_explain/dataset/validation_set/Original/068,0.0,0.27261528,0.0026739545,123_0.png
152 | ../deep_fakes_explain/dataset/validation_set/Original/053,0.0,0.081394464,0.00030341017,211_0.png
153 | ../deep_fakes_explain/dataset/validation_set/Original/018,0.0,0.2568315,0.0008767645,202_0.png
154 | ../deep_fakes_explain/dataset/validation_set/Original/066,0.0,0.032823216,0.0015409103,305_0.png
155 | ../deep_fakes_explain/dataset/validation_set/Original/027,0.0,0.66694665,0.66694665,143_1.png
156 | ../deep_fakes_explain/dataset/validation_set/Original/060,0.0,0.23908357,0.03749671,1_0.png
157 | ../deep_fakes_explain/dataset/validation_set/Original/013,0.0,0.011166614,0.0005450935,123_0.png
158 | ../deep_fakes_explain/dataset/validation_set/Original/050,0.0,0.3901606,0.0632311,211_0.png
159 | ../deep_fakes_explain/dataset/validation_set/Original/072,0.0,0.31113556,0.010056663,251_0.png
160 | ../deep_fakes_explain/dataset/validation_set/Original/088,0.0,0.025390005,0.0005013456,351_0.png
161 | ../deep_fakes_explain/dataset/validation_set/Original/062,0.0,0.17610374,0.000440764,305_0.png
162 | ../deep_fakes_explain/dataset/validation_set/Original/019,0.0,0.42165923,0.034127377,220_0.png
163 | ../deep_fakes_explain/dataset/validation_set/Original/076,0.0,0.15731053,0.000777189,81_0.png
164 | ../deep_fakes_explain/dataset/validation_set/Original/098,0.0,0.024264248,0.0010942505,216_0.png
165 | ../deep_fakes_explain/dataset/validation_set/Original/041,0.0,0.074100666,0.0005968545,110_0.png
166 | ../deep_fakes_explain/dataset/validation_set/Original/023,0.0,0.20741005,0.31245244,220_0.png
167 | ../deep_fakes_explain/dataset/validation_set/Original/063,0.0,0.16128919,0.0008019851,276_0.png
168 | ../deep_fakes_explain/dataset/validation_set/Original/037,0.0,0.026885135,0.00034899914,182_0.png
169 | ../deep_fakes_explain/dataset/validation_set/Original/035,0.0,0.28481936,0.0022529801,182_0.png
170 | ../deep_fakes_explain/dataset/validation_set/Original/075,0.0,0.0037737724,0.00022003727,276_0.png
171 | ../deep_fakes_explain/dataset/validation_set/Original/033,0.0,0.03947296,0.012914691,603_0.png
172 | ../deep_fakes_explain/dataset/validation_set/Original/077,0.0,0.40542197,0.0038475338,202_0.png
173 | ../deep_fakes_explain/dataset/validation_set/Original/016,0.0,0.11497501,0.0067070387,658_1.png
174 | ../deep_fakes_explain/dataset/validation_set/Original/082,0.0,0.0011867476,0.00029767226,251_0.png
175 | ../deep_fakes_explain/dataset/validation_set/Original/036,0.0,0.33824733,0.018248927,249_0.png
176 | ../deep_fakes_explain/dataset/validation_set/Original/042,0.0,0.018161891,0.00016859183,104_0.png
177 | ../deep_fakes_explain/dataset/validation_set/Original/011,0.0,0.9034193,0.99649733,447_1.png
178 | ../deep_fakes_explain/dataset/validation_set/Original/079,0.0,0.3315651,0.003137986,305_0.png
179 | ../deep_fakes_explain/dataset/validation_set/Original/056,0.0,0.48727208,0.0068734577,296_0.png
180 | ../deep_fakes_explain/dataset/validation_set/Original/080,0.0,0.32498786,0.0070975744,351_0.png
181 | ../deep_fakes_explain/dataset/validation_set/Original/089,0.0,0.4474884,0.0074283937,296_0.png
182 | ../deep_fakes_explain/dataset/validation_set/Original/048,0.0,0.082124576,0.0020456014,104_0.png
183 | ../deep_fakes_explain/dataset/validation_set/Original/010,0.0,0.2605654,0.0004641971,417_0.png
184 | ../deep_fakes_explain/dataset/validation_set/Original/095,0.0,0.37147158,0.31249788,681_1.png
185 | ../deep_fakes_explain/dataset/validation_set/Original/000,0.0,0.022572933,0.0005651052,351_0.png
186 | ../deep_fakes_explain/dataset/validation_set/Original/026,0.0,0.27066436,0.097405404,123_0.png
187 | ../deep_fakes_explain/dataset/validation_set/Original/006,0.0,0.09833457,0.0009872802,110_0.png
188 | ../deep_fakes_explain/dataset/validation_set/Original/074,0.0,0.110990286,0.0074868826,216_0.png
189 | ../deep_fakes_explain/dataset/validation_set/Original/093,0.0,0.20884037,0.0005126162,59_0.png
190 | ../deep_fakes_explain/dataset/validation_set/Original/039,0.0,0.22989762,0.00060701167,351_0.png
191 | ../deep_fakes_explain/dataset/validation_set/Original/070,0.0,0.15479012,0.06447731,220_0.png
192 | ../deep_fakes_explain/dataset/validation_set/Original/044,0.0,0.23673336,0.0047577643,216_0.png
193 | ../deep_fakes_explain/dataset/validation_set/Original/031,0.0,0.13467361,0.06395149,53_1.png
194 | ../deep_fakes_explain/dataset/validation_set/Original/090,0.0,0.023865988,0.00050994504,232_0.png
195 | ../deep_fakes_explain/dataset/validation_set/Original/071,0.0,0.3493236,0.08828545,104_0.png
196 | ../deep_fakes_explain/dataset/validation_set/Original/020,0.0,0.21335758,0.113004334,351_0.png
197 | ../deep_fakes_explain/dataset/validation_set/Original/052,0.0,0.0016064163,0.0002879839,220_0.png
198 | ../deep_fakes_explain/dataset/validation_set/Original/043,0.0,0.7589952,0.9970618,211_0.png
199 | ../deep_fakes_explain/dataset/validation_set/Original/069,0.0,0.16267096,0.0014619352,744_0.png
200 | ../deep_fakes_explain/dataset/validation_set/Original/045,0.0,0.12082268,0.0041052303,59_0.png
201 | ../deep_fakes_explain/dataset/validation_set/Original/099,0.0,0.23495859,0.45832977,220_0.png
202 | ../deep_fakes_explain/dataset/validation_set/Original/030,0.0,0.07430573,0.0006902066,1_0.png
203 |
--------------------------------------------------------------------------------
/model_test_train/test_model.py:
--------------------------------------------------------------------------------
1 | import matplotlib.pyplot as plt
2 | from sklearn import metrics
3 | from sklearn.metrics import auc
4 | from sklearn.metrics import accuracy_score
5 | import os
6 | from shutil import copyfile
7 | import cv2
8 | import numpy as np
9 | import torch
10 |
11 | from sklearn.metrics import f1_score
12 | from albumentations import Compose, RandomBrightnessContrast, \
13 | HorizontalFlip, FancyPCA, HueSaturationValue, OneOf, ToGray, \
14 | ShiftScaleRotate, ImageCompression, PadIfNeeded, GaussNoise, GaussianBlur, Rotate
15 |
16 | from transforms.albu import IsotropicResize
17 |
18 | from utils import get_method, check_correct, resize, shuffle_dataset, get_n_params
19 | import torch.nn as nn
20 | from functools import partial
21 | from evit_model10 import EfficientViT
22 | from multiprocessing.pool import Pool
23 | from progress.bar import Bar
24 | import pandas as pd
25 | from tqdm import tqdm
26 | from multiprocessing import Manager
27 | from utils import custom_round, custom_video_round
28 | import yaml
29 | import argparse
30 | import csv
31 |
32 |
33 | RESULTS_DIR = "results"
34 | BASE_DIR = "../deep_fakes_explain"
35 | DATA_DIR = os.path.join(BASE_DIR, "dataset")
36 | TEST_DIR = os.path.join(DATA_DIR, "validation_set")
37 | OUTPUT_DIR = os.path.join(RESULTS_DIR, "tests")
38 |
39 | TEST_LABELS_PATH = os.path.join(BASE_DIR, "dataset/dfdc_test_labels.csv")
40 |
41 |
42 |
43 | if not os.path.exists(RESULTS_DIR):
44 | os.makedirs(RESULTS_DIR)
45 |
46 | if not os.path.exists(OUTPUT_DIR):
47 | os.makedirs(OUTPUT_DIR)
48 |
49 | def create_base_transform(size):
50 | return Compose([
51 | IsotropicResize(max_side=size, interpolation_down=cv2.INTER_AREA, interpolation_up=cv2.INTER_CUBIC),
52 | PadIfNeeded(min_height=size, min_width=size, border_mode=cv2.BORDER_CONSTANT),
53 | ])
54 |
55 | def save_roc_curves(correct_labels, preds, model_name, accuracy, loss, f1):
56 | plt.figure(1)
57 | plt.plot([0, 1], [0, 1], 'k--')
58 |
59 | fpr, tpr, th = metrics.roc_curve(correct_labels, preds)
60 |
61 | model_auc = auc(fpr, tpr)
62 |
63 |
64 | plt.plot(fpr, tpr, label="Model_"+ model_name + ' (area = {:.3f})'.format(model_auc))
65 |
66 | plt.xlabel('False positive rate')
67 | plt.ylabel('True positive rate')
68 | plt.title('ROC curve')
69 | plt.legend(loc='best')
70 | plt.savefig(os.path.join(OUTPUT_DIR, model_name + "_" + opt.dataset + "_acc" + str(accuracy*100) + "_loss"+str(loss)+"_f1"+str(f1)+".jpg"))
71 | plt.clf()
72 |
73 | def read_frames(video_path, videos):
74 |
75 | # Get the video label based on dataset selected
76 | method = get_method(video_path, DATA_DIR)
77 | if "Original" in video_path:
78 | label = 0.
79 | elif method == "DFDC":
80 | test_df = pd.DataFrame(pd.read_csv(TEST_LABELS_PATH))
81 | video_folder_name = os.path.basename(video_path)
82 | video_key = video_folder_name + ".mp4"
83 | label = test_df.loc[test_df['filename'] == video_key]['label'].values[0]
84 | else:
85 | label = 1.
86 |
87 | selected_frames =[]
88 |
89 | # Calculate the interval to extract the frames
90 | frames_number = len(os.listdir(video_path))
91 | frames_interval = int(frames_number / opt.frames_per_video)
92 | frames_paths = os.listdir(video_path)
93 | frames_paths_dict = {}
94 |
95 | # Group the faces with the same index, reduce probabiity to skip some faces in the same video
96 | for path in frames_paths:
97 | for i in range(0,3):
98 | if "_" + str(i) in path:
99 | if i not in frames_paths_dict.keys():
100 | frames_paths_dict[i] = [path]
101 | else:
102 | frames_paths_dict[i].append(path)
103 |
104 | # Select only the frames at a certain interval
105 | if frames_interval > 0:
106 | for key in frames_paths_dict.keys():
107 | if len(frames_paths_dict) > frames_interval:
108 | frames_paths_dict[key] = frames_paths_dict[key][::frames_interval]
109 |
110 | frames_paths_dict[key] = frames_paths_dict[key][:opt.frames_per_video]
111 |
112 | for key in frames_paths_dict: #FIXME: to facilitate sampling frames for explaination
113 | selected_frames.append(frames_paths_dict[key])
114 |
115 |
116 | # Select N images from the collected frames
117 | video = {}
118 | for key in frames_paths_dict.keys():
119 | for index, frame_image in enumerate(frames_paths_dict[key]):
120 | transform = create_base_transform(config['model']['image-size'])
121 | image = transform(image=cv2.imread(os.path.join(video_path, frame_image)))['image']
122 | if len(image) > 0:
123 | if key in video:
124 | video[key].append(image)
125 | else:
126 | video[key] = [image]
127 | videos.append((video, label, video_path,selected_frames))
128 |
129 |
130 |
131 |
132 |
133 | # Main body
134 | if __name__ == "__main__":
135 |
136 | parser = argparse.ArgumentParser()
137 |
138 | parser.add_argument('--workers', default=16, type=int,
139 | help='Number of data loader workers.')
140 | parser.add_argument('--model_path', default='', type=str, metavar='PATH',
141 | help='Path to model checkpoint (default: none).')
142 | parser.add_argument('--dataset', type=str, default='All',
143 | help="Which dataset to use (Deepfakes|Face2Face|FaceShifter|FaceSwap|NeuralTexture|All)")
144 | parser.add_argument('--max_videos', type=int, default=-1,
145 | help="Maximum number of videos to use for training (default: all).")
146 | parser.add_argument('--config', type=str,
147 | help="Which configuration to use. See into 'config' folder.")
148 | parser.add_argument('--efficient_net', type=int, default=0,
149 | help="Which EfficientNet version to use (0 or 7, default: 0)")
150 | parser.add_argument('--frames_per_video', type=int, default=20,
151 | help="How many equidistant frames for each video (default: 20)")
152 | parser.add_argument('--batch_size', type=int, default=12,
153 | help="Batch size (default: 12)")
154 |
155 | opt = parser.parse_args()
156 | print(opt)
157 |
158 | with open(opt.config, 'r') as ymlfile:
159 | config = yaml.safe_load(ymlfile)
160 |
161 | if opt.efficient_net == 0:
162 | channels = 1280
163 | else:
164 | channels = 2560
165 |
166 | if os.path.exists(opt.model_path):
167 | model = EfficientViT(config=config, channels=channels, selected_efficient_net = opt.efficient_net)
168 | model.load_state_dict(torch.load(opt.model_path))
169 | parallel_net = nn.DataParallel(model, device_ids=[0,1]) #FIXME: parallel net added to support training and testing on multiple GPUs
170 | parallel_net = parallel_net.to(0)
171 | parallel_net.eval()
172 | # model.eval()
173 | # model = model.cuda()
174 | else:
175 | print("No model found.")
176 | exit()
177 |
178 | model_name = os.path.basename(opt.model_path)
179 |
180 | if not os.path.exists(OUTPUT_DIR):
181 | os.makedirs(OUTPUT_DIR)
182 |
183 |
184 | preds = []
185 | mgr = Manager()
186 | paths = [] # a list of paths to videos
187 | videos = mgr.list()
188 |
189 |
190 | if opt.dataset == 'All':
191 | folders = ["Original", "Face2Face", "FaceShifter", "FaceSwap", "NeuralTextures", "Deepfakes"]
192 |
193 | else:
194 | folders = [opt.dataset, "Original"]
195 |
196 | for folder in folders:
197 | method_folder = os.path.join(TEST_DIR, folder)
198 | for index, video_folder in enumerate(os.listdir(method_folder)):
199 | paths.append(os.path.join(method_folder, video_folder)) # populate the list of paths to videos
200 |
201 | with Pool(processes=opt.workers) as p:
202 | with tqdm(total=len(paths)) as pbar:
203 | for v in p.imap_unordered(partial(read_frames, videos=videos),paths):
204 | pbar.update()
205 |
206 | video_names = np.asarray([row[2] for row in videos]) # full path to videos
207 | correct_test_labels = np.asarray([row[1] for row in videos])
208 | selected_frames = np.asarray([row[3] for row in videos]) # FIXME: added to facilitate the image sampling for explanation
209 | videos = np.asarray([row[0] for row in videos]) # dictionary of images per video
210 |
211 | preds = []
212 |
213 |
214 | print ('video_names', video_names)
215 |
216 | bar = Bar('Predicting \n', max=len(videos))
217 |
218 | f = open(opt.dataset + "_" + model_name + "_labels.txt", "w+")
219 |
220 | # f = open(os.path.dirname(OUTPUT_DIR)+ opt.dataset + "_" + model_name + "_labels.txt", "w+")
221 |
222 | max_min_probs = []
223 | max_min_images = []
224 | for index, video in enumerate(videos):
225 | video_faces_preds = []
226 | video_name = video_names[index]
227 | f.write(video_name)
228 | for key in video:
229 | faces_preds = []
230 | video_faces = video[key]
231 | for i in range(0, len(video_faces), opt.batch_size):
232 | faces = video_faces[i:i+opt.batch_size]
233 | faces = torch.tensor(np.asarray(faces))
234 | if faces.shape[0] == 0:
235 | continue
236 | faces = np.transpose(faces, (0, 3, 1, 2))
237 |
238 | faces = faces.float().to(0)
239 | pred = parallel_net(faces)
240 |
241 | scaled_pred = [] #FIXME: to collect pred score for each frame and use it to sample the highest/ lowest prob frame
242 | for idx, p in enumerate(pred):
243 | scaled_pred.append(torch.sigmoid(p))
244 | faces_preds.extend(scaled_pred)
245 |
246 | current_faces_pred = sum(faces_preds)/len(faces_preds)
247 | face_pred = current_faces_pred.cpu().detach().numpy()[0]
248 | f.write(" " + str(face_pred))
249 | video_faces_preds.append(face_pred)
250 |
251 | # FIXME: the following code samples face images for visualization - one image for each video.
252 | # If video classified fake than sampling the frame with the highest prediction score.
253 | # otherwise, if video classified real, sampling the frame with the lowest prediction score.
254 |
255 | if max(video_faces_preds) > 0.55:
256 | video_faces_max_min = max(faces_preds)
257 | key = video_faces_preds.index(max(video_faces_preds))
258 | else:
259 | video_faces_max_min = min(faces_preds)
260 | key = video_faces_preds.index(min(video_faces_preds))
261 |
262 | max_min_index = faces_preds.index(video_faces_max_min)
263 | video_faces_max_min = video_faces_max_min.detach().cpu()
264 | # key = video_faces_preds.index(max(video_faces_preds))
265 | max_min_image = selected_frames[index][key][max_min_index]
266 | max_min_probs.append(np.asarray(video_faces_max_min))
267 | max_min_images.append(max_min_image)
268 |
269 | bar.next()
270 | if len(video_faces_preds) > 1:
271 | video_pred = custom_video_round(video_faces_preds)
272 |
273 | else:
274 | video_pred = video_faces_preds[0]
275 |
276 |
277 | preds.append([video_pred])
278 |
279 |
280 | f.write(" --> " + str(video_pred) + "(CORRECT: " + str(correct_test_labels[index]) + ")" +"\n")
281 |
282 | f.close()
283 | dst_name = os.path.join(OUTPUT_DIR,f.name)
284 | copyfile(f.name, dst_name)
285 | bar.finish()
286 |
287 |
288 | # FIXME: the following code creates a csv file listing all sampled frames and corresponding info
289 | # for running the explain process on these samples
290 | header = ['video_name', 'label', 'video_prob', 'high_low_prob', 'example_file_name']
291 | summary = np.asarray(video_names.reshape(len(video_names),-1))
292 | summary = np.append(summary,correct_test_labels.reshape(len(correct_test_labels),-1), axis=1)
293 | summary = np.append(summary,preds, axis=1)
294 | summary = np.append(summary, np.asarray(max_min_probs), axis=1)
295 | summary = np.append(summary,np.asarray(max_min_images).reshape(len(max_min_images),-1), axis=1)
296 |
297 |
298 | with open('samples_list'+ '_' + opt.dataset + '_' + model_name +'.csv', 'w', encoding='UTF8', newline='') as out:
299 | writer = csv.writer(out)
300 | writer.writerow(header) # write the header
301 | writer.writerows(summary) # write multiple rows
302 |
303 | dst_csv = os.path.join(OUTPUT_DIR, out.name)
304 | copyfile(out.name, dst_csv)
305 |
306 |
307 | loss_fn = torch.nn.BCEWithLogitsLoss()
308 | tensor_labels = torch.tensor([[float(label)] for label in correct_test_labels])
309 | tensor_preds = torch.tensor(preds)
310 |
311 |
312 | loss = loss_fn(tensor_preds, tensor_labels).numpy()
313 |
314 | accuracy = accuracy_score(custom_round(np.asarray(preds)), correct_test_labels)
315 |
316 | f1 = f1_score(correct_test_labels, custom_round(np.asarray(preds)))
317 | print(model_name, "Test Accuracy:", accuracy, "Loss:", loss, "F1", f1)
318 | save_roc_curves(correct_test_labels, preds, model_name, accuracy, loss, f1)
319 |
320 |
--------------------------------------------------------------------------------
/model_test_train/train_model.py:
--------------------------------------------------------------------------------
1 | from efficientnet_pytorch import EfficientNet
2 | import torch
3 | from torch.utils.data import DataLoader, TensorDataset, Dataset
4 | from einops import rearrange, repeat
5 | from torch import nn, einsum
6 | import torch.nn as nn
7 | import torch.nn.functional as F
8 | from random import random, randint, choice
9 | from vit_pytorch import ViT
10 | import numpy as np
11 | from torch.optim import lr_scheduler
12 | import os
13 | import json
14 | from os import cpu_count
15 | from multiprocessing.pool import Pool
16 | from functools import partial
17 | from multiprocessing import Manager
18 | from progress.bar import ChargingBar
19 | # from efficient_vit_data_parallel7 import parallel_net
20 | from evit_model10 import EfficientViT
21 | import uuid
22 | from torch.utils.data import DataLoader, TensorDataset, Dataset
23 | from sklearn.metrics import accuracy_score
24 | import cv2
25 | from transforms.albu import IsotropicResize
26 | import glob
27 | import pandas as pd
28 | from tqdm import tqdm
29 | from utils import get_method, check_correct, resize, shuffle_dataset, get_n_params
30 | from sklearn.utils.class_weight import compute_class_weight
31 | from torch.optim.lr_scheduler import LambdaLR
32 | import collections
33 | from deepfakes_dataset import DeepFakesDataset
34 | import math
35 | import yaml
36 | import argparse
37 |
38 |
39 | BASE_DIR = '../deep_fakes_explain/'
40 | DATA_DIR = os.path.join(BASE_DIR, "dataset")
41 | TRAINING_DIR = os.path.join(DATA_DIR, "training_set")
42 | VALIDATION_DIR = os.path.join(DATA_DIR, "validation_set")
43 | TEST_DIR = os.path.join(DATA_DIR, "test_set")
44 | MODELS_PATH = os.path.join(BASE_DIR, "models")
45 | METADATA_PATH = os.path.join(BASE_DIR, "data/metadata") # Folder containing all training metadata for DFDC dataset
46 | VALIDATION_LABELS_PATH = os.path.join(DATA_DIR, "dfdc_val_labels.csv")
47 |
48 |
49 |
50 |
51 | def read_frames(video_path, train_dataset, validation_dataset):
52 | # Get the video label based on dataset selected
53 | method = get_method(video_path, DATA_DIR)
54 | if TRAINING_DIR in video_path:
55 | if "Original" in video_path:
56 | label = 0.
57 | elif "DFDC" in video_path:
58 | for json_path in glob.glob(os.path.join(METADATA_PATH, "*.json")):
59 | with open(json_path, "r") as f:
60 | metadata = json.load(f)
61 | video_folder_name = os.path.basename(video_path)
62 | video_key = video_folder_name + ".mp4"
63 | if video_key in metadata.keys():
64 | item = metadata[video_key]
65 | label = item.get("label", None)
66 | if label == "FAKE":
67 | label = 1.
68 | else:
69 | label = 0.
70 | break
71 | else:
72 | label = None
73 | else:
74 | label = 1.
75 | if label == None:
76 | print("NOT FOUND", video_path)
77 | else:
78 | if "Original" in video_path:
79 | label = 0.
80 | elif "DFDC" in video_path:
81 | val_df = pd.DataFrame(pd.read_csv(VALIDATION_LABELS_PATH))
82 | video_folder_name = os.path.basename(video_path)
83 | video_key = video_folder_name + ".mp4"
84 | label = val_df.loc[val_df['filename'] == video_key]['label'].values[0]
85 | else:
86 | label = 1.
87 |
88 | # Calculate the interval to extract the frames
89 | frames_number = len(os.listdir(video_path))
90 | if label == 0:
91 | min_video_frames = max(int(config['training']['frames-per-video'] * config['training']['rebalancing_real']),
92 | 1) # Compensate unbalancing
93 | else:
94 | min_video_frames = max(int(config['training']['frames-per-video'] * config['training']['rebalancing_fake']), 1)
95 |
96 | if VALIDATION_DIR in video_path:
97 | min_video_frames = int(max(min_video_frames / 8, 2))
98 | frames_interval = int(frames_number / min_video_frames)
99 | frames_paths = os.listdir(video_path)
100 | frames_paths_dict = {}
101 |
102 | # Group the faces with the same index, reduce probabiity to skip some faces in the same video
103 | for path in frames_paths:
104 | for i in range(0, 1):
105 | if "_" + str(i) in path:
106 | if i not in frames_paths_dict.keys():
107 | frames_paths_dict[i] = [path]
108 | else:
109 | frames_paths_dict[i].append(path)
110 |
111 | # Select only the frames at a certain interval
112 | if frames_interval > 0:
113 | for key in frames_paths_dict.keys():
114 | if len(frames_paths_dict) > frames_interval:
115 | frames_paths_dict[key] = frames_paths_dict[key][::frames_interval]
116 |
117 | frames_paths_dict[key] = frames_paths_dict[key][:min_video_frames]
118 |
119 | # Select N frames from the collected ones
120 | for key in frames_paths_dict.keys():
121 | for index, frame_image in enumerate(frames_paths_dict[key]):
122 | # image = transform(np.asarray(cv2.imread(os.path.join(video_path, frame_image))))
123 | image = cv2.imread(os.path.join(video_path, frame_image))
124 | if image is not None:
125 | if TRAINING_DIR in video_path:
126 | train_dataset.append((image, label))
127 | else:
128 | validation_dataset.append((image, label))
129 |
130 |
131 | # Main body
132 | if __name__ == "__main__":
133 |
134 | parser = argparse.ArgumentParser()
135 | parser.add_argument('--num_epochs', default=100, type=int,
136 | help='Number of training epochs.')
137 | parser.add_argument('--workers', default=16, type=int,
138 | help='Number of data loader workers.')
139 | parser.add_argument('--resume', default='', type=str, metavar='PATH',
140 | help='Path to latest checkpoint (default: none).')
141 | parser.add_argument('--dataset', type=str, default='All',
142 | help="Which dataset to use (Deepfakes|Face2Face|FaceShifter|FaceSwap|NeuralTextures|All)")
143 | parser.add_argument('--max_videos', type=int, default=-1,
144 | help="Maximum number of videos to use for training (default: all).")
145 | parser.add_argument('--config', type=str,
146 | help="Which configuration to use. See into 'config' folder.")
147 | parser.add_argument('--efficient_net', type=int, default=0,
148 | help="Which EfficientNet version to use (0 or 7, default: 0)")
149 | parser.add_argument('--patience', type=int, default=5,
150 | help="How many epochs wait before stopping for validation loss not improving.")
151 |
152 | opt = parser.parse_args()
153 | print(opt)
154 |
155 | with open(opt.config, 'r') as ymlfile:
156 | config = yaml.safe_load(ymlfile)
157 |
158 | if opt.efficient_net == 0:
159 | channels = 1280
160 | else:
161 | channels = 2560
162 |
163 | model = EfficientViT(config=config, channels=channels, selected_efficient_net=opt.efficient_net)
164 | # model_parallel = EViT_parallel(net =model)
165 | parallel_net = nn.DataParallel(model, device_ids=[0, 1])
166 | parallel_net = parallel_net.to(0)
167 | parallel_net.train()
168 |
169 | optimizer = torch.optim.SGD(parallel_net.parameters(), lr=config['training']['lr'],
170 | weight_decay=config['training']['weight-decay'])
171 | scheduler = lr_scheduler.StepLR(optimizer, step_size=config['training']['step-size'],
172 | gamma=config['training']['gamma'])
173 | starting_epoch = 0
174 | if os.path.exists(opt.resume):
175 | model.load_state_dict(torch.load(opt.resume))
176 | starting_epoch = int(opt.resume.split("checkpoint")[1].split("_")[
177 | 0]) + 1 # The checkpoint's file name format should be "checkpoint_EPOCH"
178 | else:
179 | print("No checkpoint loaded.")
180 |
181 | print("Model Parameters:", get_n_params(model))
182 |
183 | # READ DATASET
184 | if opt.dataset != "All" and opt.dataset != "DFDC":
185 | folders = ["Original", opt.dataset]
186 | else:
187 | folders = ["Original", "Deepfakes", "Face2Face", "FaceShifter", "FaceSwap", "NeuralTextures"] # Fixme: erased DFDC
188 |
189 | sets = [TRAINING_DIR, VALIDATION_DIR]
190 |
191 | paths = []
192 | for dataset in sets:
193 | for folder in folders:
194 | subfolder = os.path.join(dataset, folder)
195 | for index, video_folder_name in enumerate(os.listdir(subfolder)):
196 | if index == opt.max_videos:
197 | break
198 |
199 | if os.path.isdir(os.path.join(subfolder, video_folder_name)):
200 | paths.append(os.path.join(subfolder, video_folder_name))
201 |
202 | mgr = Manager()
203 | train_dataset = mgr.list()
204 | validation_dataset = mgr.list()
205 |
206 | with Pool(processes=opt.workers) as p:
207 | with tqdm(total=len(paths)) as pbar:
208 | for v in p.imap_unordered(
209 | partial(read_frames, train_dataset=train_dataset, validation_dataset=validation_dataset), paths):
210 | pbar.update()
211 |
212 | train_samples = len(train_dataset)
213 | train_dataset = shuffle_dataset(train_dataset)
214 | validation_samples = len(validation_dataset)
215 | validation_dataset = shuffle_dataset(validation_dataset)
216 |
217 | # Print some useful statistics
218 | print("Train images:", len(train_dataset), "Validation images:", len(validation_dataset))
219 | print("__TRAINING STATS__")
220 | train_counters = collections.Counter(image[1] for image in train_dataset)
221 | print(train_counters)
222 |
223 | class_weights = train_counters[0] / train_counters[1]
224 | print("Weights", class_weights)
225 |
226 | print("__VALIDATION STATS__")
227 | val_counters = collections.Counter(image[1] for image in validation_dataset)
228 | print(val_counters)
229 | print("___________________")
230 |
231 | loss_fn = torch.nn.BCEWithLogitsLoss(pos_weight=torch.tensor([class_weights]))
232 |
233 | # Create the data loaders
234 | validation_labels = np.asarray([row[1] for row in validation_dataset])
235 | labels = np.asarray([row[1] for row in train_dataset])
236 |
237 | train_dataset = DeepFakesDataset(np.asarray([row[0] for row in train_dataset]), labels,
238 | config['model']['image-size'])
239 | dl = torch.utils.data.DataLoader(train_dataset, batch_size=config['training']['bs'], shuffle=True, sampler=None,
240 | batch_sampler=None, num_workers=opt.workers, collate_fn=None,
241 | pin_memory=False, drop_last=False, timeout=0,
242 | worker_init_fn=None, prefetch_factor=2,
243 | persistent_workers=False)
244 | del train_dataset
245 |
246 | validation_dataset = DeepFakesDataset(np.asarray([row[0] for row in validation_dataset]), validation_labels,
247 | config['model']['image-size'], mode='validation')
248 | val_dl = torch.utils.data.DataLoader(validation_dataset, batch_size=config['training']['bs'], shuffle=True,
249 | sampler=None,
250 | batch_sampler=None, num_workers=opt.workers, collate_fn=None,
251 | pin_memory=False, drop_last=False, timeout=0,
252 | worker_init_fn=None, prefetch_factor=2,
253 | persistent_workers=False)
254 | del validation_dataset
255 |
256 | # model = model.cuda()
257 | counter = 0
258 | not_improved_loss = 0
259 | previous_loss = math.inf
260 | for t in range(starting_epoch, opt.num_epochs + 1):
261 | if not_improved_loss == opt.patience:
262 | break
263 | counter = 0
264 |
265 | total_loss = 0
266 | total_val_loss = 0
267 |
268 | bar = ChargingBar('EPOCH #' + str(t), max=(len(dl) * config['training']['bs']) + len(val_dl))
269 | train_correct = 0
270 | positive = 0
271 | negative = 0
272 | for index, (images, labels) in enumerate(dl):
273 | images = np.transpose(images, (0, 3, 1, 2))
274 | labels = labels.unsqueeze(1)
275 | # images = images.cuda()
276 | images = images.to(0)
277 |
278 | y_pred = parallel_net(images)
279 | y_pred = y_pred.cpu()
280 |
281 | loss = loss_fn(y_pred, labels)
282 |
283 | corrects, positive_class, negative_class = check_correct(y_pred, labels)
284 | train_correct += corrects
285 | positive += positive_class
286 | negative += negative_class
287 | optimizer.zero_grad()
288 |
289 | # loss.backward()
290 | loss.mean().backward()
291 |
292 | optimizer.step()
293 | counter += 1
294 | total_loss += round(loss.item(), 2)
295 |
296 | if index % 1200 == 0: # Intermediate metrics print
297 | print("\nLoss:", total_loss / counter, "| Accuracy:",
298 | train_correct / (counter * config['training']['bs']), "| Train reals:", negative, "| Train fakes:",
299 | positive)
300 |
301 | for i in range(config['training']['bs']):
302 | bar.next()
303 |
304 | val_correct = 0
305 | val_positive = 0
306 | val_negative = 0
307 | val_counter = 0
308 | train_correct /= train_samples
309 | total_loss /= counter
310 | for index, (val_images, val_labels) in enumerate(val_dl):
311 | val_images = np.transpose(val_images, (0, 3, 1, 2))
312 |
313 | val_images = val_images.cuda()
314 | val_labels = val_labels.unsqueeze(1)
315 | val_pred = parallel_net(val_images)
316 | val_pred = val_pred.cpu()
317 | val_loss = loss_fn(val_pred, val_labels)
318 | total_val_loss += round(val_loss.item(), 2)
319 | corrects, positive_class, negative_class = check_correct(val_pred, val_labels)
320 | val_correct += corrects
321 | val_positive += positive_class
322 | val_counter += 1
323 | val_negative += negative_class
324 | bar.next()
325 |
326 | scheduler.step()
327 | bar.finish()
328 |
329 | total_val_loss /= val_counter
330 | val_correct /= validation_samples
331 | if previous_loss <= total_val_loss:
332 | print("Validation loss did not improved")
333 | not_improved_loss += 1
334 | else:
335 | not_improved_loss = 0
336 |
337 | previous_loss = total_val_loss
338 | print("#" + str(t) + "/" + str(opt.num_epochs) + " loss:" +
339 | str(total_loss) + " accuracy:" + str(train_correct) + " val_loss:" + str(
340 | total_val_loss) + " val_accuracy:" + str(val_correct) + " val_0s:" + str(val_negative) + "/" + str(
341 | np.count_nonzero(validation_labels == 0)) + " val_1s:" + str(val_positive) + "/" + str(
342 | np.count_nonzero(validation_labels == 1)))
343 |
344 | if not os.path.exists(MODELS_PATH):
345 | os.makedirs(MODELS_PATH)
346 | torch.save(model.state_dict(), os.path.join(MODELS_PATH,
347 | "efficientnetB" + str(opt.efficient_net) + "_checkpoint" + str(
348 | t) + "_" + opt.dataset))
349 |
--------------------------------------------------------------------------------
/model_test_train/transforms/albu.py:
--------------------------------------------------------------------------------
1 | import random
2 |
3 | import cv2
4 | import numpy as np
5 | from albumentations import DualTransform, ImageOnlyTransform
6 | from albumentations.augmentations.functional import crop
7 |
8 |
9 | def isotropically_resize_image(img, size, interpolation_down=cv2.INTER_AREA, interpolation_up=cv2.INTER_CUBIC):
10 | h, w = img.shape[:2]
11 |
12 | if max(w, h) == size:
13 | return img
14 | if w > h:
15 | scale = size / w
16 | h = h * scale
17 | w = size
18 | else:
19 | scale = size / h
20 | w = w * scale
21 | h = size
22 | interpolation = interpolation_up if scale > 1 else interpolation_down
23 |
24 | img = img.astype('uint8')
25 | resized = cv2.resize(img, (int(w), int(h)), interpolation=interpolation)
26 | return resized
27 |
28 |
29 | class IsotropicResize(DualTransform):
30 | def __init__(self, max_side, interpolation_down=cv2.INTER_AREA, interpolation_up=cv2.INTER_CUBIC,
31 | always_apply=False, p=1):
32 | super(IsotropicResize, self).__init__(always_apply, p)
33 | self.max_side = max_side
34 | self.interpolation_down = interpolation_down
35 | self.interpolation_up = interpolation_up
36 |
37 | def apply(self, img, interpolation_down=cv2.INTER_AREA, interpolation_up=cv2.INTER_CUBIC, **params):
38 | return isotropically_resize_image(img, size=self.max_side, interpolation_down=interpolation_down,
39 | interpolation_up=interpolation_up)
40 |
41 | def apply_to_mask(self, img, **params):
42 | return self.apply(img, interpolation_down=cv2.INTER_NEAREST, interpolation_up=cv2.INTER_NEAREST, **params)
43 |
44 | def get_transform_init_args_names(self):
45 | return ("max_side", "interpolation_down", "interpolation_up")
46 |
47 |
48 | class Resize4xAndBack(ImageOnlyTransform):
49 | def __init__(self, always_apply=False, p=0.5):
50 | super(Resize4xAndBack, self).__init__(always_apply, p)
51 |
52 | def apply(self, img, **params):
53 | h, w = img.shape[:2]
54 | scale = random.choice([2, 4])
55 | img = cv2.resize(img, (w // scale, h // scale), interpolation=cv2.INTER_AREA)
56 | img = cv2.resize(img, (w, h),
57 | interpolation=random.choice([cv2.INTER_CUBIC, cv2.INTER_LINEAR, cv2.INTER_NEAREST]))
58 | return img
59 |
60 | class RandomSizedCropNonEmptyMaskIfExists(DualTransform):
61 |
62 | def __init__(self, min_max_height, w2h_ratio=[0.7, 1.3], always_apply=False, p=0.5):
63 | super(RandomSizedCropNonEmptyMaskIfExists, self).__init__(always_apply, p)
64 |
65 | self.min_max_height = min_max_height
66 | self.w2h_ratio = w2h_ratio
67 |
68 | def apply(self, img, x_min=0, x_max=0, y_min=0, y_max=0, **params):
69 | cropped = crop(img, x_min, y_min, x_max, y_max)
70 | return cropped
71 |
72 | @property
73 | def targets_as_params(self):
74 | return ["mask"]
75 |
76 | def get_params_dependent_on_targets(self, params):
77 | mask = params["mask"]
78 | mask_height, mask_width = mask.shape[:2]
79 | crop_height = int(mask_height * random.uniform(self.min_max_height[0], self.min_max_height[1]))
80 | w2h_ratio = random.uniform(*self.w2h_ratio)
81 | crop_width = min(int(crop_height * w2h_ratio), mask_width - 1)
82 | if mask.sum() == 0:
83 | x_min = random.randint(0, mask_width - crop_width + 1)
84 | y_min = random.randint(0, mask_height - crop_height + 1)
85 | else:
86 | mask = mask.sum(axis=-1) if mask.ndim == 3 else mask
87 | non_zero_yx = np.argwhere(mask)
88 | y, x = random.choice(non_zero_yx)
89 | x_min = x - random.randint(0, crop_width - 1)
90 | y_min = y - random.randint(0, crop_height - 1)
91 | x_min = np.clip(x_min, 0, mask_width - crop_width)
92 | y_min = np.clip(y_min, 0, mask_height - crop_height)
93 |
94 | x_max = x_min + crop_height
95 | y_max = y_min + crop_width
96 | y_max = min(mask_height, y_max)
97 | x_max = min(mask_width, x_max)
98 | return {"x_min": x_min, "x_max": x_max, "y_min": y_min, "y_max": y_max}
99 |
100 | def get_transform_init_args_names(self):
101 | return "min_max_height", "height", "width", "w2h_ratio"
--------------------------------------------------------------------------------
/model_test_train/utils.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | from albumentations import Compose, PadIfNeeded
3 | from transforms.albu import IsotropicResize
4 | import numpy as np
5 | import os
6 | import cv2
7 | import torch
8 | from statistics import mean
9 |
10 | def transform_frame(image, image_size):
11 | transform_pipeline = Compose([
12 | IsotropicResize(max_side=image_size, interpolation_down=cv2.INTER_LINEAR, interpolation_up=cv2.INTER_LINEAR),
13 | PadIfNeeded(min_height=image_size, min_width=image_size, border_mode=cv2.BORDER_REPLICATE)
14 | ]
15 | )
16 | return transform_pipeline(image=image)['image']
17 |
18 |
19 | def resize(image, image_size):
20 | try:
21 | return cv2.resize(image, dsize=(image_size, image_size))
22 | except:
23 | return []
24 |
25 | def custom_round(values):
26 | result = []
27 | for value in values:
28 | if value > 0.55:
29 | result.append(1)
30 | else:
31 | result.append(0)
32 | return np.asarray(result)
33 |
34 | def custom_video_round(preds):
35 | for pred_value in preds:
36 | if pred_value > 0.55:
37 | return pred_value
38 | return mean(preds)
39 |
40 |
41 |
42 | def get_method(video, data_path):
43 | # methods = os.listdir(os.path.join(data_path, "manipulated_sequences"))
44 | # methods.extend(os.listdir(os.path.join(data_path, "original_sequences")))
45 | # methods.append("DFDC")
46 | # methods.append("Original")
47 | methods = os.listdir(os.path.join(data_path,"test_set"))
48 | selected_method = ""
49 | for method in methods:
50 | if method in video:
51 | selected_method = method
52 | break
53 | return selected_method
54 |
55 | def shuffle_dataset(dataset):
56 | import random
57 | random.seed(4)
58 | random.shuffle(dataset)
59 | return dataset
60 |
61 | def get_n_params(model):
62 | pp=0
63 | for p in list(model.parameters()):
64 | nn=1
65 | for s in list(p.size()):
66 | nn = nn*s
67 | pp += nn
68 | return pp
69 |
70 | def check_correct(preds, labels):
71 | preds = preds.cpu()
72 | labels = labels.cpu()
73 | preds = [np.asarray(torch.sigmoid(pred).detach().numpy()).round() for pred in preds]
74 |
75 | correct = 0
76 | positive_class = 0
77 | negative_class = 0
78 | for i in range(len(labels)):
79 | pred = int(preds[i])
80 | if labels[i] == pred:
81 | correct += 1
82 | if pred == 1:
83 | positive_class += 1
84 | else:
85 | negative_class += 1
86 | return correct, positive_class, negative_class
87 |
--------------------------------------------------------------------------------
/preprocessing/detect_faces.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import json
3 | import os
4 | import numpy as np
5 | from typing import Type
6 |
7 | from torch.utils.data.dataloader import DataLoader
8 | from tqdm import tqdm
9 |
10 | import face_detector
11 | from face_detector import VideoDataset
12 | from face_detector import VideoFaceDetector
13 | from utils import get_video_paths, get_method
14 | import argparse
15 |
16 |
17 | def process_videos(videos, detector_cls: Type[VideoFaceDetector], selected_dataset, opt):
18 | detector = face_detector.__dict__[detector_cls](device="cuda:0")
19 |
20 | dataset = VideoDataset(videos)
21 |
22 | loader = DataLoader(dataset, shuffle=False, num_workers=40, batch_size=1, collate_fn=lambda x: x)
23 | missed_videos = []
24 | for item in tqdm(loader):
25 | result = {}
26 | video, indices, frames = item[0]
27 | if selected_dataset == 1:
28 | method = get_method(video, opt.data_path)
29 | out_dir = os.path.join(opt.data_path, "boxes", method)
30 | else:
31 | out_dir = os.path.join(opt.data_path, "boxes")
32 |
33 | id = os.path.splitext(os.path.basename(video))[0]
34 |
35 | if os.path.exists(out_dir) and "{}.json".format(id) in os.listdir(out_dir):
36 | continue
37 | batches = [frames[i:i + detector._batch_size] for i in range(0, len(frames), detector._batch_size)]
38 |
39 | for j, frames in enumerate(batches):
40 | result.update({int(j * detector._batch_size) + i : b for i, b in zip(indices, detector._detect_faces(frames))})
41 |
42 |
43 | os.makedirs(out_dir, exist_ok=True)
44 | print(len(result))
45 | if len(result) > 0:
46 | with open(os.path.join(out_dir, "{}.json".format(id)), "w") as f:
47 | json.dump(result, f)
48 | else:
49 | missed_videos.append(id)
50 |
51 | if len(missed_videos) > 0:
52 | print("The detector did not find faces inside the following videos:")
53 | print(id)
54 | print("We suggest to re-run the code decreasing the detector threshold.")
55 |
56 |
57 | def main():
58 | parser = argparse.ArgumentParser()
59 | parser.add_argument('--dataset', default="DFDC", type=str,
60 | help='Dataset (DFDC / FACEFORENSICS)')
61 | parser.add_argument('--data_path', default='', type=str,
62 | help='Videos directory')
63 | parser.add_argument("--detector-type", help="type of the detector", default="FacenetDetector",
64 | choices=["FacenetDetector"])
65 |
66 | opt = parser.parse_args()
67 | print(opt)
68 |
69 |
70 | if opt.dataset.upper() == "DFDC":
71 | dataset = 0
72 | else:
73 | dataset = 1
74 |
75 | videos_paths = []
76 | if dataset == 1:
77 | videos_paths = get_video_paths(opt.data_path, dataset)
78 | else:
79 | os.makedirs(os.path.join(opt.data_path, "boxes"), exist_ok=True)
80 | already_extracted = os.listdir(os.path.join(opt.data_path, "boxes"))
81 | for folder in os.listdir(opt.data_path):
82 | if "boxes" not in folder and "zip" not in folder:
83 | if os.path.isdir(os.path.join(opt.data_path, folder)): # For training and test set
84 | for video_name in os.listdir(os.path.join(opt.data_path, folder)):
85 | if video_name.split(".")[0] + ".json" in already_extracted:
86 | continue
87 | videos_paths.append(os.path.join(opt.data_path, folder, video_name))
88 | else: # For validation set
89 | videos_paths.append(os.path.join(opt.data_path, folder))
90 |
91 |
92 | process_videos(videos_paths, opt.detector_type, dataset, opt)
93 |
94 |
95 | if __name__ == "__main__":
96 | main()
97 |
--------------------------------------------------------------------------------
/preprocessing/extract_crops.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import json
3 | import os
4 | from os import cpu_count
5 | from pathlib import Path
6 |
7 | os.environ["MKL_NUM_THREADS"] = "1"
8 | os.environ["NUMEXPR_NUM_THREADS"] = "1"
9 | os.environ["OMP_NUM_THREADS"] = "1"
10 | from functools import partial
11 | from glob import glob
12 | from multiprocessing.pool import Pool
13 |
14 | import cv2
15 |
16 | cv2.ocl.setUseOpenCL(False)
17 | cv2.setNumThreads(0)
18 | from tqdm import tqdm
19 |
20 | from utils import get_video_paths, get_method, get_method_from_name
21 |
22 | def extract_video(video, root_dir, dataset):
23 | try:
24 | if dataset == 0:
25 | bboxes_path = os.path.join(opt.data_path, "boxes", os.path.splitext(os.path.basename(video))[0] + ".json")
26 | else:
27 | bboxes_path = os.path.join(opt.data_path, "boxes", get_method_from_name(video), os.path.splitext(os.path.basename(video))[0] + ".json")
28 |
29 | if not os.path.exists(bboxes_path) or not os.path.exists(video):
30 | return
31 | with open(bboxes_path, "r") as bbox_f:
32 | bboxes_dict = json.load(bbox_f)
33 |
34 | capture = cv2.VideoCapture(video)
35 | frames_num = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))
36 | counter = 0
37 | for i in range(frames_num):
38 | capture.grab()
39 | #if i % 2 != 0:
40 | # continue
41 | success, frame = capture.retrieve()
42 | if not success or str(i) not in bboxes_dict:
43 | continue
44 | id = os.path.splitext(os.path.basename(video))[0]
45 | crops = []
46 | bboxes = bboxes_dict[str(i)]
47 | if bboxes is None:
48 | continue
49 | else:
50 | counter += 1
51 | for bbox in bboxes:
52 | xmin, ymin, xmax, ymax = [int(b * 2) for b in bbox]
53 | w = xmax - xmin
54 | h = ymax - ymin
55 | p_h = 0
56 | p_w = 0
57 |
58 | #p_h = h // 3
59 | #p_w = w // 3
60 |
61 | #p_h = h // 6
62 | #p_w = w // 6
63 |
64 | if h > w:
65 | p_w = int((h-w)/2)
66 | elif h < w:
67 | p_h = int((w-h)/2)
68 |
69 | crop = frame[max(ymin - p_h, 0):ymax + p_h, max(xmin - p_w, 0):xmax + p_w]
70 | h, w = crop.shape[:2]
71 | crops.append(crop)
72 |
73 |
74 |
75 | os.makedirs(os.path.join(opt.output_path, id), exist_ok=True)
76 | for j, crop in enumerate(crops):
77 | cv2.imwrite(os.path.join(opt.output_path, id, "{}_{}.png".format(i, j)), crop)
78 | if counter == 0:
79 | print(video, counter)
80 | except e:
81 | print("Error:", e)
82 |
83 |
84 |
85 | if __name__ == '__main__':
86 | parser = argparse.ArgumentParser()
87 | parser.add_argument('--dataset', default="DFDC", type=str,
88 | help='Dataset (DFDC / FACEFORENSICS')
89 | parser.add_argument('--data_path', default='', type=str,
90 | help='Videos directory')
91 | parser.add_argument('--output_path', default='', type=str,
92 | help='Output directory')
93 |
94 | opt = parser.parse_args()
95 | print(opt)
96 |
97 |
98 | if opt.dataset.upper() == "DFDC":
99 | dataset = 0
100 | else:
101 | dataset = 1
102 |
103 |
104 | os.makedirs(opt.output_path, exist_ok=True)
105 | #excluded_videos = os.listdir(os.path.join(opt.output_dir)) # Useful to avoid to extract from already extracted videos
106 | excluded_videos = os.listdir(opt.output_path)
107 | if dataset == 0:
108 | paths = get_video_paths(opt.data_path, dataset, excluded_videos)
109 | #paths.extend(get_video_paths(opt.data_path, dataset, excluded_videos))
110 | else:
111 | paths = get_video_paths(os.path.join(opt.data_path, "manipulated_sequences"), dataset)
112 | paths.extend(get_video_paths(os.path.join(opt.data_path, "original_sequences"), dataset))
113 |
114 | with Pool(processes=cpu_count()-2) as p:
115 | with tqdm(total=len(paths)) as pbar:
116 | for v in p.imap_unordered(partial(extract_video, root_dir=opt.data_path, dataset=dataset), paths):
117 | pbar.update()
118 |
--------------------------------------------------------------------------------
/preprocessing/face_detector.py:
--------------------------------------------------------------------------------
1 | import os
2 | os.environ["MKL_NUM_THREADS"] = "1"
3 | os.environ["NUMEXPR_NUM_THREADS"] = "1"
4 | os.environ["OMP_NUM_THREADS"] = "1"
5 |
6 | from abc import ABC, abstractmethod
7 | from collections import OrderedDict
8 | from typing import List
9 |
10 | import cv2
11 | cv2.ocl.setUseOpenCL(False)
12 | cv2.setNumThreads(0)
13 |
14 | from PIL import Image
15 | from facenet_pytorch.models.mtcnn import MTCNN
16 | from torch.utils.data import Dataset
17 |
18 |
19 | class VideoFaceDetector(ABC):
20 |
21 | def __init__(self, **kwargs) -> None:
22 | super().__init__()
23 |
24 | @property
25 | @abstractmethod
26 | def _batch_size(self) -> int:
27 | pass
28 |
29 | @abstractmethod
30 | def _detect_faces(self, frames) -> List:
31 | pass
32 |
33 |
34 | class FacenetDetector(VideoFaceDetector):
35 |
36 | def __init__(self, device="cuda:0") -> None:
37 | super().__init__()
38 | self.detector = MTCNN(margin=0, thresholds=[0.85, 0.95, 0.95], device=device)
39 |
40 | def _detect_faces(self, frames) -> List:
41 | batch_boxes, *_ = self.detector.detect(frames, landmarks=False)
42 | return [b.tolist() if b is not None else None for b in batch_boxes]
43 |
44 | @property
45 | def _batch_size(self):
46 | return 32
47 |
48 |
49 | class VideoDataset(Dataset):
50 |
51 | def __init__(self, videos) -> None:
52 | super().__init__()
53 | self.videos = videos
54 |
55 | def __getitem__(self, index: int):
56 | video = self.videos[index]
57 | capture = cv2.VideoCapture(video)
58 | frames_num = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))
59 |
60 | frames = OrderedDict()
61 | for i in range(frames_num):
62 | capture.grab()
63 | success, frame = capture.retrieve()
64 | if not success:
65 | continue
66 | frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
67 | frame = Image.fromarray(frame)
68 | frame = frame.resize(size=[s // 2 for s in frame.size])
69 | frames[i] = frame
70 | return video, list(frames.keys()), list(frames.values())
71 |
72 | def __len__(self) -> int:
73 | return len(self.videos)
74 |
--------------------------------------------------------------------------------
/preprocessing/utils.py:
--------------------------------------------------------------------------------
1 | import json
2 | import os
3 | from glob import glob
4 | from pathlib import Path
5 | import cv2
6 | def get_video_paths(data_path, dataset, excluded_videos=[]):
7 | videos_folders = os.listdir(data_path)
8 | videos_paths = []
9 | for folder in videos_folders:
10 | if folder == "boxes" or "set" in folder or folder == "splits" or folder == "actors" or folder == "crops" or folder == "DeepFakeDetection" or "zip" in folder:
11 | continue
12 |
13 |
14 | folder_path = os.path.join(data_path, folder)
15 |
16 | if dataset == 1:
17 | folder_path = os.path.join(folder_path, "c23", "videos")
18 |
19 |
20 | if not os.path.isdir(folder_path):
21 | return [os.path.join(data_path, video_name) for video_name in videos_folders]
22 |
23 | for index, video in enumerate(os.listdir(folder_path)):
24 | if "metadata" in video or video.split(".")[0] in excluded_videos:
25 | continue
26 | videos_paths.append(os.path.join(folder_path, video))
27 | return videos_paths
28 |
29 | def resize(image, image_size):
30 | try:
31 | return cv2.resize(image, dsize=(image_size, image_size))
32 | except:
33 | return []
34 |
35 | def get_original_video_paths(root_dir, basename=False):
36 | originals = set()
37 | originals_v = set()
38 | for json_path in glob(os.path.join(root_dir, "*/metadata.json")):
39 | dir = Path(json_path).parent
40 | with open(json_path, "r") as f:
41 | metadata = json.load(f)
42 | for k, v in metadata.items():
43 | original = v.get("original", None)
44 | if v["label"] == "REAL":
45 | original = k
46 | originals_v.add(original)
47 | originals.add(os.path.join(dir, original))
48 | originals = list(originals)
49 | originals_v = list(originals_v)
50 |
51 | return originals_v if basename else originals
52 |
53 |
54 |
55 | def get_method_from_name(video):
56 | methods = ["youtube", "Deepfakes", "Face2Face", "FaceShifter", "FaceSwap", "NeuralTextures"]
57 | for method in methods:
58 | if method in video:
59 | return method
60 |
61 | def get_method(video, data_path):
62 | methods = os.listdir(os.path.join(data_path, "manipulated_sequences"))
63 | methods.extend(os.listdir(os.path.join(data_path, "original_sequences")))
64 | methods.append("DFDC")
65 | methods.append("Original")
66 | selected_method = ""
67 | for method in methods:
68 | if method in video:
69 | selected_method = method
70 | break
71 | return selected_method
72 |
73 | def get_original_with_fakes(root_dir):
74 | pairs = []
75 | for json_path in glob(os.path.join(root_dir, "*/metadata.json")):
76 | with open(json_path, "r") as f:
77 | metadata = json.load(f)
78 | for k, v in metadata.items():
79 | original = v.get("original", None)
80 | if v["label"] == "FAKE":
81 | pairs.append((original[:-4], k[:-4] ))
82 |
83 | return pairs
84 |
85 |
86 | def get_originals_and_fakes(root_dir):
87 | originals = []
88 | fakes = []
89 | for json_path in glob(os.path.join(root_dir, "*/metadata.json")):
90 | with open(json_path, "r") as f:
91 | metadata = json.load(f)
92 | for k, v in metadata.items():
93 | if v["label"] == "FAKE":
94 | fakes.append(k[:-4])
95 | else:
96 | originals.append(k[:-4])
97 |
98 | return originals, fakes
99 |
100 |
--------------------------------------------------------------------------------