├── .gitignore
├── .vscode
├── launch.json
└── settings.json
├── LICENCE
├── README.md
├── __init__.py
├── ablation
├── Fig5_ablate_individual_template.py
├── Fig8_ablate_iidnoise_video_architecture.py
├── Fig8_ablate_motionalignednoise_video_architecture.py
└── __init__.py
├── action_eval.py
├── action_train.py
├── afd
├── RAFT
│ ├── LICENSE
│ ├── RAFT.png
│ ├── README.md
│ ├── __init__.py
│ ├── alt_cuda_corr
│ │ ├── correlation.cpp
│ │ ├── correlation_kernel.cu
│ │ └── setup.py
│ ├── chairs_split.txt
│ ├── core
│ │ ├── __init__.py
│ │ ├── corr.py
│ │ ├── datasets.py
│ │ ├── extractor.py
│ │ ├── raft.py
│ │ ├── update.py
│ │ └── utils
│ │ │ ├── __init__.py
│ │ │ ├── augmentor.py
│ │ │ ├── flow_viz.py
│ │ │ ├── frame_utils.py
│ │ │ └── utils.py
│ ├── demo.py
│ ├── download_models.sh
│ ├── evaluate.py
│ ├── models
│ │ ├── raft-chairs.pth
│ │ ├── raft-kitti.pth
│ │ ├── raft-sintel.pth
│ │ ├── raft-small.pth
│ │ └── raft-things.pth
│ ├── train.py
│ ├── train_mixed.sh
│ └── train_standard.sh
├── SliceViewer.py
├── afd_single_video.py
├── flowiz.py
├── generate_ipn.py
├── generate_kth.py
├── generate_sbu.py
├── save_helper.py
└── util.py
├── batch_eval.py
├── batch_train.py
├── config.py
├── create_table_action.py
├── create_table_privacy.py
├── data
└── splits
│ ├── ipn
│ ├── Annot_List.txt
│ ├── Annot_testList.txt
│ ├── Annot_trainList.txt
│ ├── Video_TestList.txt
│ ├── Video_TrainList.txt
│ ├── classIdx.txt
│ ├── metadata.xlsx
│ ├── metadata_test.csv
│ └── metadata_train.csv
│ ├── kth
│ ├── create_metadata.py
│ ├── download_kth.sh
│ ├── extract_kthframes.sh
│ ├── metadata.csv
│ └── sequences.csv
│ └── sbu
│ ├── test.txt
│ └── train.txt
├── dataset
├── __init__.py
├── db_factory.py
├── db_stats.py
├── ipn.py
├── kth.py
└── sbu.py
├── environment.yaml
├── matcher
├── .gitignore
├── LICENSE
├── __init__.py
├── compute_similarities_from_descriptor.py
├── extractor.py
├── helpers.py
├── input
│ ├── ipn1.jpg
│ ├── ipn2.jpg
│ └── testvideo
│ │ ├── 4CM11_7_R_#34_000100.jpg
│ │ ├── 4CM11_7_R_#34_000101.jpg
│ │ ├── 4CM11_7_R_#34_000102.jpg
│ │ ├── 4CM11_7_R_#34_000103.jpg
│ │ ├── 4CM11_7_R_#34_000104.jpg
│ │ ├── 4CM11_7_R_#34_000105.jpg
│ │ ├── 4CM11_7_R_#34_000106.jpg
│ │ ├── 4CM11_7_R_#34_000107.jpg
│ │ ├── 4CM11_7_R_#34_000108.jpg
│ │ ├── 4CM11_7_R_#34_000109.jpg
│ │ ├── 4CM11_7_R_#34_000110.jpg
│ │ ├── 4CM11_7_R_#34_000111.jpg
│ │ ├── 4CM11_7_R_#34_000112.jpg
│ │ ├── 4CM11_7_R_#34_000113.jpg
│ │ ├── 4CM11_7_R_#34_000114.jpg
│ │ ├── 4CM11_7_R_#34_000115.jpg
│ │ ├── 4CM11_7_R_#34_000116.jpg
│ │ ├── 4CM11_7_R_#34_000117.jpg
│ │ ├── 4CM11_7_R_#34_000118.jpg
│ │ ├── 4CM11_7_R_#34_000119.jpg
│ │ ├── 4CM11_7_R_#34_000120.jpg
│ │ ├── 4CM11_7_R_#34_000121.jpg
│ │ ├── 4CM11_7_R_#34_000122.jpg
│ │ ├── 4CM11_7_R_#34_000123.jpg
│ │ ├── 4CM11_7_R_#34_000124.jpg
│ │ ├── 4CM11_7_R_#34_000125.jpg
│ │ ├── 4CM11_7_R_#34_000126.jpg
│ │ ├── 4CM11_7_R_#34_000127.jpg
│ │ ├── 4CM11_7_R_#34_000128.jpg
│ │ ├── 4CM11_7_R_#34_000129.jpg
│ │ ├── 4CM11_7_R_#34_000130.jpg
│ │ ├── 4CM11_7_R_#34_000131.jpg
│ │ ├── 4CM11_7_R_#34_000132.jpg
│ │ ├── 4CM11_7_R_#34_000133.jpg
│ │ ├── 4CM11_7_R_#34_000134.jpg
│ │ ├── 4CM11_7_R_#34_000135.jpg
│ │ ├── 4CM11_7_R_#34_000136.jpg
│ │ ├── 4CM11_7_R_#34_000137.jpg
│ │ ├── 4CM11_7_R_#34_000138.jpg
│ │ ├── 4CM11_7_R_#34_000139.jpg
│ │ ├── 4CM11_7_R_#34_000140.jpg
│ │ ├── 4CM11_7_R_#34_000141.jpg
│ │ ├── 4CM11_7_R_#34_000142.jpg
│ │ ├── 4CM11_7_R_#34_000143.jpg
│ │ ├── 4CM11_7_R_#34_000144.jpg
│ │ ├── 4CM11_7_R_#34_000145.jpg
│ │ ├── 4CM11_7_R_#34_000146.jpg
│ │ ├── 4CM11_7_R_#34_000147.jpg
│ │ ├── 4CM11_7_R_#34_000148.jpg
│ │ ├── 4CM11_7_R_#34_000149.jpg
│ │ ├── 4CM11_7_R_#34_000150.jpg
│ │ ├── 4CM11_7_R_#34_000151.jpg
│ │ ├── 4CM11_7_R_#34_000152.jpg
│ │ ├── 4CM11_7_R_#34_000153.jpg
│ │ ├── 4CM11_7_R_#34_000154.jpg
│ │ ├── 4CM11_7_R_#34_000155.jpg
│ │ ├── 4CM11_7_R_#34_000156.jpg
│ │ ├── 4CM11_7_R_#34_000157.jpg
│ │ ├── 4CM11_7_R_#34_000158.jpg
│ │ ├── 4CM11_7_R_#34_000159.jpg
│ │ ├── 4CM11_7_R_#34_000160.jpg
│ │ ├── 4CM11_7_R_#34_000161.jpg
│ │ ├── 4CM11_7_R_#34_000162.jpg
│ │ ├── 4CM11_7_R_#34_000163.jpg
│ │ ├── 4CM11_7_R_#34_000164.jpg
│ │ ├── 4CM11_7_R_#34_000165.jpg
│ │ ├── 4CM11_7_R_#34_000166.jpg
│ │ ├── 4CM11_7_R_#34_000167.jpg
│ │ ├── 4CM11_7_R_#34_000168.jpg
│ │ ├── 4CM11_7_R_#34_000169.jpg
│ │ ├── 4CM11_7_R_#34_000170.jpg
│ │ ├── 4CM11_7_R_#34_000171.jpg
│ │ ├── 4CM11_7_R_#34_000172.jpg
│ │ ├── 4CM11_7_R_#34_000173.jpg
│ │ ├── 4CM11_7_R_#34_000174.jpg
│ │ ├── 4CM11_7_R_#34_000175.jpg
│ │ ├── 4CM11_7_R_#34_000176.jpg
│ │ ├── 4CM11_7_R_#34_000177.jpg
│ │ ├── 4CM11_7_R_#34_000178.jpg
│ │ ├── 4CM11_7_R_#34_000179.jpg
│ │ ├── 4CM11_7_R_#34_000180.jpg
│ │ ├── 4CM11_7_R_#34_000181.jpg
│ │ ├── 4CM11_7_R_#34_000182.jpg
│ │ ├── 4CM11_7_R_#34_000183.jpg
│ │ ├── 4CM11_7_R_#34_000184.jpg
│ │ ├── 4CM11_7_R_#34_000185.jpg
│ │ ├── 4CM11_7_R_#34_000186.jpg
│ │ ├── 4CM11_7_R_#34_000187.jpg
│ │ ├── 4CM11_7_R_#34_000188.jpg
│ │ ├── 4CM11_7_R_#34_000189.jpg
│ │ ├── 4CM11_7_R_#34_000190.jpg
│ │ ├── 4CM11_7_R_#34_000191.jpg
│ │ ├── 4CM11_7_R_#34_000192.jpg
│ │ ├── 4CM11_7_R_#34_000193.jpg
│ │ ├── 4CM11_7_R_#34_000194.jpg
│ │ ├── 4CM11_7_R_#34_000195.jpg
│ │ ├── 4CM11_7_R_#34_000196.jpg
│ │ ├── 4CM11_7_R_#34_000197.jpg
│ │ ├── 4CM11_7_R_#34_000198.jpg
│ │ ├── 4CM11_7_R_#34_000199.jpg
│ │ └── 4CM11_7_R_#34_000200.jpg
├── run_batch.py
├── save_descriptor.py
└── similarity_from_template_for_dataset.py
├── models
├── __init__.py
└── valid_models.py
├── output
└── descriptors
│ ├── arm.pt
│ ├── cheek.pt
│ ├── eyes.pt
│ ├── forehead.pt
│ ├── hair.pt
│ ├── hand.pt
│ ├── leg.pt
│ ├── lips.pt
│ ├── mouth.pt
│ └── torso.pt
├── privacy_eval.py
├── privacy_train.py
├── readme_assets
├── ablation.png
├── baselines_qualitative.png
├── hair_tiled.png
├── iid.png
├── lambda.png
├── method.png
├── orig_tiled.png
├── saliency.png
├── table.png
├── teaser.png
└── vscodetargets.png
├── simulation
├── __init__.py
├── helpers.py
└── simulation.py
├── transforms
├── __init__.py
└── transform.py
├── utils
├── AverageMeter.py
├── ConfusionMatrix.py
├── Trainer.py
├── VideoTensorViewer.py
├── __init__.py
├── info_print.py
├── metrics.py
└── model_utils.py
├── visualize_qualitative
├── __init__.py
├── baselines_qualitative_vis.py
└── similarity_from_tempate_vis.py
├── visualize_quantitative
├── Fig7_pgf_plots_average.py
├── Fig7_pgf_plots_individual.py
└── load_and_plot_confusionmatrix.py
└── yolo
├── create_masks_ipn_or_kth.py
├── create_masks_sbu.py
├── yolov8n-seg.pt
└── yolov8n.pt
/.gitignore:
--------------------------------------------------------------------------------
1 | **__pycache__**
2 | /data/*
3 | *.DS_Store
4 | /runs/
5 | !/data/splits/
--------------------------------------------------------------------------------
/.vscode/settings.json:
--------------------------------------------------------------------------------
1 | {
2 | "python.analysis.extraPaths": [
3 | "./utils"
4 | ],
5 | "[python]": {
6 | "editor.defaultFormatter": "ms-python.black-formatter"
7 | },
8 | "python.formatting.provider": "none"
9 | }
--------------------------------------------------------------------------------
/LICENCE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2024 Filip Ilic
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
--------------------------------------------------------------------------------
/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/__init__.py
--------------------------------------------------------------------------------
/ablation/Fig5_ablate_individual_template.py:
--------------------------------------------------------------------------------
1 | import os
2 | from models.valid_models import action_models
3 | from models.valid_models import privacy_models
4 | from tqdm import tqdm
5 |
6 | from itertools import chain, combinations
7 |
8 |
9 | """
10 | How does the masking of different attributes contribute the the action / privacy performance?
11 | """
12 |
13 | """ Part of Fig.5 Caption:
14 | Obfuscation with a Single Attribute and the Impact on Performance.
15 | Attribute importance is dataset dependent. For example,
16 | notice how the ’Hand’ template contributes to a large decrease in
17 | action recognition performance on IPN,as the action is determined soley
18 | by the hand, whereas on SBU it does not.
19 | """
20 |
21 |
22 | def powerset(iterable):
23 | "powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
24 | s = list(iterable) # allows duplicate elements
25 | return chain.from_iterable(combinations(s, r) for r in range(len(s) + 1))
26 |
27 |
28 | attributes = [
29 | "arm",
30 | "cheek",
31 | "eyes",
32 | "forehead",
33 | "hair",
34 | "hand",
35 | "lips",
36 | "torso",
37 | "leg",
38 | ]
39 |
40 | return_codes = []
41 | for i, combo in tqdm(
42 | enumerate(powerset(attributes), 1), total=len(list(powerset(attributes)))
43 | ):
44 | if len(combo) == 0: # skip empty set
45 | continue
46 |
47 | # skip sets larger than 1
48 | if len(combo) > 1:
49 | continue
50 |
51 | att_args = " ".join([c for c in combo])
52 |
53 | for k in privacy_models.keys():
54 | run_str = f"CUDA_VISIBLE_DEVICES=0 python privacy_eval.py --architecture {k} --datasetname kth -pretrained --batch_size 32 --num_workers 16 -selectively_mask --obfuscate {att_args} -privacy"
55 | return_codes.append((os.system(run_str), run_str))
56 |
57 | for k in action_models.keys():
58 | run_str = f"CUDA_VISIBLE_DEVICES=0 python action_eval.py --architecture {k} --datasetname kth -pretrained -train_backbone --batch_size 2 --num_workers 16 -selectively_mask --obfuscate {att_args}"
59 | os.system(run_str)
60 | return_codes.append((os.system(run_str), run_str))
61 |
62 |
63 | # print return codes tuple nicely formatted
64 | for code, command in return_codes:
65 | if code != 0:
66 | error_string = "[ FAIL ]"
67 | else:
68 | error_string = "[ PASS ]"
69 | print(f"{error_string} --- {command}")
70 |
--------------------------------------------------------------------------------
/ablation/Fig8_ablate_iidnoise_video_architecture.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import torch
3 | from os.path import join
4 |
5 | from torch import nn
6 | from torch.utils.data import DataLoader
7 |
8 | from torch.utils.tensorboard import SummaryWriter
9 |
10 | from config import cfg, parser, build_cfg
11 | from dataset.db_factory import DBfactory
12 | from utils.Trainer import Trainer, Trainer_E2SX3D
13 | from simulation.simulation import Simulation
14 | from utils.info_print import print_data_augmentation_transform, print_learnable_params
15 | from utils.model_utils import load_weights
16 | from transforms.transform import valid_models
17 |
18 | from utils.model_utils import (
19 | build_model,
20 | build_model_name,
21 | load_weights,
22 | set_seed,
23 | )
24 |
25 | parser.add_argument("--architecture", type=str)
26 | cfg = build_cfg()
27 |
28 |
29 | # Setting reproducibility
30 |
31 |
32 | def main():
33 | set_seed()
34 | torch.backends.cudnn.benchmark = True
35 | datasetname = cfg["datasetname"]
36 | batch_size = cfg["batch_size"]
37 | num_workers = 16
38 |
39 | test_dataset = DBfactory(datasetname, set_split="test", config=cfg)
40 |
41 | # ----------------- Setup Model & Load weights if supplied -----------------
42 | architecture = cfg["architecture"]
43 |
44 | if architecture not in valid_models.keys():
45 | raise ValueError("This model is not defined in the valid_model dictionary.")
46 |
47 | model = build_model(
48 | architecture,
49 | cfg["pretrained"],
50 | test_dataset.num_classes,
51 | cfg["train_backbone"],
52 | )
53 |
54 | model.name = build_model_name(cfg)
55 | model.configuration = cfg
56 | load_weights(model, None, cfg["weights_path"])
57 | # exit()
58 | criterion = nn.CrossEntropyLoss().cuda()
59 | model = model.cuda()
60 |
61 | sim_name = f"ablate/{cfg['datasetname']}/{model.name}"
62 | with Simulation(sim_name=sim_name, output_root="runs") as sim:
63 | cfg["executed"] = f'python {" ".join(sys.argv)}'
64 | print(f'Running: {cfg["executed"]}\n\n\n')
65 | print_learnable_params(model)
66 | print_data_augmentation_transform(test_dataset.transform)
67 | print(f"Begin training: {model.name}")
68 |
69 | writer = SummaryWriter(join(sim.outdir, "tensorboard"))
70 | if "e2s_x3d" in cfg["architecture"]:
71 | trainer = Trainer_E2SX3D(sim)
72 | else:
73 | trainer = Trainer(sim)
74 |
75 | # -------------- MAIN TRAINING LOOP ----------------------
76 | for noise_level in torch.arange(0, 110, 10):
77 | cfg["noise_level"] = noise_level / 100.0
78 | # Re-init the dataset every time with the correct noise level
79 | test_dataset = DBfactory(datasetname, set_split="test", config=cfg)
80 |
81 | test_dataloader = DataLoader(
82 | test_dataset,
83 | batch_size,
84 | num_workers=num_workers,
85 | shuffle=True,
86 | drop_last=False,
87 | )
88 |
89 | trainer.do(
90 | "ablate",
91 | model,
92 | test_dataloader,
93 | noise_level,
94 | criterion,
95 | None,
96 | writer,
97 | log_video=True,
98 | )
99 |
100 | writer.close
101 |
102 |
103 | if __name__ == "__main__":
104 | main()
105 |
--------------------------------------------------------------------------------
/ablation/Fig8_ablate_motionalignednoise_video_architecture.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import torch
3 | from os.path import join
4 |
5 | from torch import nn
6 | from torch.utils.data import DataLoader
7 |
8 | from torch.utils.tensorboard import SummaryWriter
9 |
10 | from config import cfg, parser, build_cfg
11 | from dataset.db_factory import DBfactory
12 | from utils.Trainer import Trainer, Trainer_E2SX3D
13 | from simulation.simulation import Simulation
14 | from utils.info_print import print_data_augmentation_transform, print_learnable_params
15 | from utils.model_utils import load_weights
16 | from transforms.transform import valid_models
17 | from utils.model_utils import (
18 | build_model,
19 | build_model_name,
20 | load_weights,
21 | set_seed,
22 | )
23 |
24 | # from train_video_architecture import build_model, build_model_name
25 |
26 | parser.add_argument("--architecture", type=str)
27 | parser.add_argument("--use_motion_aligned", default="FULL", type=str)
28 |
29 | cfg = build_cfg()
30 |
31 |
32 | # Setting reproducibility
33 |
34 |
35 | def main():
36 | set_seed()
37 | torch.backends.cudnn.benchmark = True
38 | datasetname = cfg["datasetname"]
39 | batch_size = cfg["batch_size"]
40 | num_workers = 16
41 |
42 | test_dataset = DBfactory(datasetname, set_split="test", config=cfg)
43 |
44 | # ----------------- Setup Model & Load weights if supplied -----------------
45 | architecture = cfg["architecture"]
46 |
47 | if architecture not in valid_models.keys():
48 | raise ValueError("This model is not defined in the valid_model dictionary.")
49 |
50 | model = build_model(
51 | architecture,
52 | cfg["pretrained"],
53 | test_dataset.num_classes,
54 | cfg["train_backbone"],
55 | )
56 |
57 | model.name = build_model_name(cfg)
58 | model.configuration = cfg
59 | load_weights(model, None, cfg["weights_path"])
60 |
61 | criterion = nn.CrossEntropyLoss().cuda()
62 | model = model.cuda()
63 |
64 | sim_name = f"ablate_afdnoise/{cfg['datasetname']}/{model.name}"
65 | with Simulation(sim_name=sim_name, output_root="runs") as sim:
66 | cfg["executed"] = f'python {" ".join(sys.argv)}'
67 | print(f'Running: {cfg["executed"]}\n\n\n')
68 | print_learnable_params(model)
69 | print_data_augmentation_transform(test_dataset.transform)
70 | print(f"Begin training: {model.name}")
71 |
72 | writer = SummaryWriter(join(sim.outdir, "tensorboard"))
73 | if "e2s_x3d" in cfg["architecture"]:
74 | trainer = Trainer_E2SX3D(sim)
75 | else:
76 | trainer = Trainer(sim)
77 |
78 | # -------------- MAIN TRAINING LOOP ----------------------
79 | for noise_level in torch.arange(0, 110, 10):
80 | cfg["afd_combine_level"] = noise_level / 100.0
81 | # if "e2s_x3d" in cfg["architecture"] and noise_level < 50:
82 | # cfg["afd_combine_level"] = torch.sigmoid(noise_level) / 100.0
83 | print(f'{cfg["afd_combine_level"]:0.2f}')
84 |
85 | # Re-init the dataset every time with the correct noise level
86 | test_dataset = DBfactory(datasetname, set_split="test", config=cfg)
87 |
88 | test_dataloader = DataLoader(
89 | test_dataset,
90 | batch_size,
91 | num_workers=num_workers,
92 | shuffle=True,
93 | drop_last=False,
94 | )
95 |
96 | trainer.do(
97 | "ablate_afdnoise",
98 | model,
99 | test_dataloader,
100 | noise_level,
101 | criterion,
102 | None,
103 | writer,
104 | log_video=True,
105 | )
106 |
107 | writer.close
108 |
109 |
110 | if __name__ == "__main__":
111 | main()
112 |
--------------------------------------------------------------------------------
/ablation/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/ablation/__init__.py
--------------------------------------------------------------------------------
/action_eval.py:
--------------------------------------------------------------------------------
1 | from os.path import join
2 |
3 | import torch
4 | from torch import nn
5 | from torch.utils.data import DataLoader
6 | from torch.utils.tensorboard import SummaryWriter
7 |
8 | from config import build_cfg, cfg, parser
9 | from dataset.db_factory import DBfactory
10 | from models.valid_models import valid_models
11 | from simulation.simulation import Simulation
12 | from utils.model_utils import (
13 | build_info_name,
14 | build_model,
15 | build_model_name,
16 | load_weights,
17 | set_seed,
18 | )
19 | from utils.Trainer import Trainer, Trainer_E2SX3D
20 |
21 | parser.add_argument("--architecture", type=str)
22 | cfg = build_cfg()
23 |
24 |
25 | # Setting reproducibility
26 |
27 |
28 | def main():
29 | set_seed()
30 | torch.backends.cudnn.benchmark = True
31 | datasetname = cfg["datasetname"]
32 | batch_size = cfg["batch_size"]
33 | num_workers = cfg["num_workers"]
34 |
35 | test_dataset = DBfactory(datasetname, set_split="test", config=cfg)
36 | test_dataloader = DataLoader(
37 | test_dataset,
38 | batch_size,
39 | num_workers=num_workers,
40 | shuffle=True,
41 | drop_last=False,
42 | )
43 | # ----------------- Setup Model & Load weights if supplied -----------------
44 | architecture = cfg["architecture"]
45 |
46 | if architecture not in valid_models.keys():
47 | raise ValueError("This model is not defined in the valid_model dictionary.")
48 |
49 | model = build_model(
50 | architecture,
51 | cfg["pretrained"],
52 | test_dataset.num_classes,
53 | cfg["train_backbone"],
54 | )
55 |
56 | model.name = build_model_name(cfg)
57 | model.configuration = cfg
58 |
59 | print(
60 | "MAKE SURE YOU RUN create_table_action.py FIRST SO THAT ALL THE BEST.PT FILES ARE CREATED"
61 | )
62 | cfg["weights_path"] = f"runs/action/{datasetname}/{model.name}/best.pt"
63 |
64 | load_weights(model, None, cfg["weights_path"])
65 |
66 | criterion = nn.CrossEntropyLoss().cuda()
67 | model = model.cuda()
68 |
69 | added_info = build_info_name(cfg)
70 |
71 | # sim_name = (
72 | # f"action_eval/{datasetname}/{cfg['datasetname']}{added_info}/{model.name}"
73 | # )
74 | sim_name = f"action_eval_attributes/{datasetname}/{cfg['datasetname']}{added_info}/{model.name}"
75 |
76 | with Simulation(sim_name=sim_name, output_root="runs") as sim:
77 | writer = SummaryWriter(join(sim.outdir, "tensorboard"))
78 | if "e2s_x3d" in cfg["architecture"]:
79 | trainer = Trainer_E2SX3D(sim)
80 | else:
81 | trainer = Trainer(sim)
82 |
83 | trainer.do(
84 | "eval",
85 | model,
86 | test_dataloader,
87 | 0,
88 | criterion,
89 | None,
90 | writer,
91 | log_video=True,
92 | )
93 |
94 | writer.close
95 |
96 |
97 | if __name__ == "__main__":
98 | main()
99 | print("\n\n [ DONE ]")
100 |
--------------------------------------------------------------------------------
/action_train.py:
--------------------------------------------------------------------------------
1 | import sys
2 | from os.path import join
3 |
4 | import torch
5 | from torch import nn, optim
6 | from torch.utils.data import DataLoader
7 | from torch.utils.tensorboard import SummaryWriter
8 |
9 | from config import build_cfg, cfg, parser
10 | from dataset.db_factory import DBfactory
11 | from models.valid_models import valid_models
12 | from simulation.simulation import Simulation
13 | from utils.info_print import print_data_augmentation_transform, print_learnable_params
14 | from utils.model_utils import build_model, build_model_name, set_seed
15 | from utils.Trainer import Trainer, Trainer_E2SX3D
16 |
17 | parser.add_argument("--architecture", type=str)
18 | cfg = build_cfg()
19 |
20 | import matplotlib
21 |
22 | matplotlib.use("Agg")
23 | # Setting reproducibility
24 |
25 |
26 | def main():
27 | set_seed()
28 | torch.backends.cudnn.benchmark = True
29 | datasetname = cfg["datasetname"]
30 | batch_size = cfg["batch_size"]
31 | num_workers = cfg["num_workers"]
32 | accum_every = cfg["accumulate_grad_batches"]
33 | gpus = cfg["gpus"]
34 | multigpu = len(gpus) > 1
35 |
36 | train_dataset = DBfactory(datasetname, set_split="train", config=cfg)
37 | test_dataset = DBfactory(datasetname, set_split="test", config=cfg)
38 |
39 | train_dataloader = DataLoader(
40 | train_dataset,
41 | batch_size,
42 | num_workers=num_workers,
43 | shuffle=True,
44 | drop_last=True,
45 | )
46 | test_dataloader = DataLoader(
47 | test_dataset,
48 | batch_size,
49 | num_workers=num_workers,
50 | shuffle=True,
51 | drop_last=True,
52 | )
53 |
54 | # ----------------- Setup Model & Load weights if supplied -----------------
55 | architecture = cfg["architecture"]
56 |
57 | if architecture not in valid_models.keys():
58 | raise ValueError("This model is not defined in the valid_model dictionary.")
59 |
60 | model = build_model(
61 | architecture,
62 | cfg["pretrained"],
63 | train_dataset.num_classes,
64 | cfg["train_backbone"],
65 | )
66 |
67 | model.name = build_model_name(cfg)
68 | model.configuration = cfg
69 | optimizer = optim.Adam(model.parameters(), lr=cfg["lr"])
70 |
71 | if multigpu:
72 | model = nn.DataParallel(model, device_ids=gpus)
73 | model.name = model.module.name
74 |
75 | # load_weights(model, optimizer, cfg["weights_path"])
76 | criterion = nn.CrossEntropyLoss().cuda()
77 | model = model.cuda()
78 |
79 | sim_name = f"action/{cfg['datasetname']}/{model.name}"
80 | best_top1 = 0
81 |
82 | with Simulation(sim_name=sim_name, output_root="runs") as sim:
83 | print(f"-------------- CFG ------------------\n")
84 | for k, v in cfg.items():
85 | print(f"{k}: {v}")
86 | print(f"-------------------------------------\n")
87 |
88 | cfg["executed"] = f'python {" ".join(sys.argv)}'
89 | print(f'Running: {cfg["executed"]}\n\n\n')
90 | print_learnable_params(model, verbose=False)
91 | print_data_augmentation_transform(train_dataset.transform)
92 | print(f"Begin training: {model.name}")
93 |
94 | writer = SummaryWriter(join(sim.outdir, "tensorboard"))
95 |
96 | if "e2s_x3d" in cfg["architecture"]:
97 | trainer = Trainer_E2SX3D(sim)
98 | else:
99 | trainer = Trainer(sim)
100 |
101 | # -------------- MAIN TRAINING LOOP ----------------------
102 | for epoch in range(cfg["num_epochs"]):
103 | trainer.do(
104 | "train",
105 | model,
106 | train_dataloader,
107 | epoch,
108 | criterion,
109 | optimizer,
110 | writer,
111 | log_video=False,
112 | accumulate_grad_batches=accum_every,
113 | )
114 |
115 | if epoch % 5 == 0 or epoch == cfg["num_epochs"] - 1:
116 | curr_top1 = trainer.do(
117 | "test",
118 | model,
119 | test_dataloader,
120 | epoch,
121 | criterion,
122 | None,
123 | writer,
124 | log_video=False,
125 | )
126 | if curr_top1 > best_top1:
127 | best_top1 = curr_top1
128 |
129 | checkpoint = {
130 | "epoch": epoch,
131 | "state_dict": (
132 | model.module.state_dict()
133 | if multigpu
134 | else model.state_dict()
135 | ),
136 | "optimizer": optimizer.state_dict(),
137 | }
138 | sim.save_pytorch(checkpoint, epoch=epoch)
139 |
140 | print(f"\nRun {sim.outdir} finished\n")
141 |
142 | writer.close
143 |
144 |
145 | if __name__ == "__main__":
146 | main()
147 |
--------------------------------------------------------------------------------
/afd/RAFT/LICENSE:
--------------------------------------------------------------------------------
1 | BSD 3-Clause License
2 |
3 | Copyright (c) 2020, princeton-vl
4 | All rights reserved.
5 |
6 | Redistribution and use in source and binary forms, with or without
7 | modification, are permitted provided that the following conditions are met:
8 |
9 | * Redistributions of source code must retain the above copyright notice, this
10 | list of conditions and the following disclaimer.
11 |
12 | * Redistributions in binary form must reproduce the above copyright notice,
13 | this list of conditions and the following disclaimer in the documentation
14 | and/or other materials provided with the distribution.
15 |
16 | * Neither the name of the copyright holder nor the names of its
17 | contributors may be used to endorse or promote products derived from
18 | this software without specific prior written permission.
19 |
20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 |
--------------------------------------------------------------------------------
/afd/RAFT/RAFT.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/afd/RAFT/RAFT.png
--------------------------------------------------------------------------------
/afd/RAFT/README.md:
--------------------------------------------------------------------------------
1 | # RAFT
2 | This repository contains the source code for our paper:
3 |
4 | [RAFT: Recurrent All Pairs Field Transforms for Optical Flow](https://arxiv.org/pdf/2003.12039.pdf)
5 | ECCV 2020
6 | Zachary Teed and Jia Deng
7 |
8 |
9 |
10 | ## Requirements
11 | The code has been tested with PyTorch 1.6 and Cuda 10.1.
12 | ```Shell
13 | conda create --name raft
14 | conda activate raft
15 | conda install pytorch=1.6.0 torchvision=0.7.0 cudatoolkit=10.1 matplotlib tensorboard scipy opencv -c pytorch
16 | ```
17 |
18 | ## Demos
19 | Pretrained models can be downloaded by running
20 | ```Shell
21 | ./download_models.sh
22 | ```
23 | or downloaded from [google drive](https://drive.google.com/drive/folders/1sWDsfuZ3Up38EUQt7-JDTT1HcGHuJgvT?usp=sharing)
24 |
25 | You can demo a trained model on a sequence of frames
26 | ```Shell
27 | python demo.py --model=models/raft-things.pth --path=demo-frames
28 | ```
29 |
30 | ## Required Data
31 | To evaluate/train RAFT, you will need to download the required datasets.
32 | * [FlyingChairs](https://lmb.informatik.uni-freiburg.de/resources/datasets/FlyingChairs.en.html#flyingchairs)
33 | * [FlyingThings3D](https://lmb.informatik.uni-freiburg.de/resources/datasets/SceneFlowDatasets.en.html)
34 | * [Sintel](http://sintel.is.tue.mpg.de/)
35 | * [KITTI](http://www.cvlibs.net/datasets/kitti/eval_scene_flow.php?benchmark=flow)
36 | * [HD1K](http://hci-benchmark.iwr.uni-heidelberg.de/) (optional)
37 |
38 |
39 | By default `datasets.py` will search for the datasets in these locations. You can create symbolic links to wherever the datasets were downloaded in the `datasets` folder
40 |
41 | ```Shell
42 | ├── datasets
43 | ├── Sintel
44 | ├── test
45 | ├── training
46 | ├── KITTI
47 | ├── testing
48 | ├── training
49 | ├── devkit
50 | ├── FlyingChairs_release
51 | ├── data
52 | ├── FlyingThings3D
53 | ├── frames_cleanpass
54 | ├── frames_finalpass
55 | ├── optical_flow
56 | ```
57 |
58 | ## Evaluation
59 | You can evaluate a trained model using `evaluate.py`
60 | ```Shell
61 | python evaluate.py --model=models/raft-things.pth --dataset=sintel --mixed_precision
62 | ```
63 |
64 | ## Training
65 | We used the following training schedule in our paper (2 GPUs). Training logs will be written to the `runs` which can be visualized using tensorboard
66 | ```Shell
67 | ./train_standard.sh
68 | ```
69 |
70 | If you have a RTX GPU, training can be accelerated using mixed precision. You can expect similiar results in this setting (1 GPU)
71 | ```Shell
72 | ./train_mixed.sh
73 | ```
74 |
75 | ## (Optional) Efficent Implementation
76 | You can optionally use our alternate (efficent) implementation by compiling the provided cuda extension
77 | ```Shell
78 | cd alt_cuda_corr && python setup.py install && cd ..
79 | ```
80 | and running `demo.py` and `evaluate.py` with the `--alternate_corr` flag Note, this implementation is somewhat slower than all-pairs, but uses significantly less GPU memory during the forward pass.
81 |
--------------------------------------------------------------------------------
/afd/RAFT/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/afd/RAFT/__init__.py
--------------------------------------------------------------------------------
/afd/RAFT/alt_cuda_corr/correlation.cpp:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 |
4 | // CUDA forward declarations
5 | std::vector corr_cuda_forward(
6 | torch::Tensor fmap1,
7 | torch::Tensor fmap2,
8 | torch::Tensor coords,
9 | int radius);
10 |
11 | std::vector corr_cuda_backward(
12 | torch::Tensor fmap1,
13 | torch::Tensor fmap2,
14 | torch::Tensor coords,
15 | torch::Tensor corr_grad,
16 | int radius);
17 |
18 | // C++ interface
19 | #define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor")
20 | #define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous")
21 | #define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)
22 |
23 | std::vector corr_forward(
24 | torch::Tensor fmap1,
25 | torch::Tensor fmap2,
26 | torch::Tensor coords,
27 | int radius) {
28 | CHECK_INPUT(fmap1);
29 | CHECK_INPUT(fmap2);
30 | CHECK_INPUT(coords);
31 |
32 | return corr_cuda_forward(fmap1, fmap2, coords, radius);
33 | }
34 |
35 |
36 | std::vector corr_backward(
37 | torch::Tensor fmap1,
38 | torch::Tensor fmap2,
39 | torch::Tensor coords,
40 | torch::Tensor corr_grad,
41 | int radius) {
42 | CHECK_INPUT(fmap1);
43 | CHECK_INPUT(fmap2);
44 | CHECK_INPUT(coords);
45 | CHECK_INPUT(corr_grad);
46 |
47 | return corr_cuda_backward(fmap1, fmap2, coords, corr_grad, radius);
48 | }
49 |
50 |
51 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
52 | m.def("forward", &corr_forward, "CORR forward");
53 | m.def("backward", &corr_backward, "CORR backward");
54 | }
--------------------------------------------------------------------------------
/afd/RAFT/alt_cuda_corr/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup
2 | from torch.utils.cpp_extension import BuildExtension, CUDAExtension
3 |
4 |
5 | setup(
6 | name='correlation',
7 | ext_modules=[
8 | CUDAExtension('alt_cuda_corr',
9 | sources=['correlation.cpp', 'correlation_kernel.cu'],
10 | extra_compile_args={'cxx': [], 'nvcc': ['-O3']}),
11 | ],
12 | cmdclass={
13 | 'build_ext': BuildExtension
14 | })
15 |
16 |
--------------------------------------------------------------------------------
/afd/RAFT/core/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/afd/RAFT/core/__init__.py
--------------------------------------------------------------------------------
/afd/RAFT/core/corr.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn.functional as F
3 | from RAFT.core.utils.utils import bilinear_sampler, coords_grid
4 | # from utils.utils import bilinear_sampler, coords_grid
5 |
6 | # try:
7 | # import alt_cuda_corr
8 | # except:
9 | # # alt_cuda_corr is not compiled
10 | # pass
11 |
12 |
13 | class CorrBlock:
14 | def __init__(self, fmap1, fmap2, num_levels=4, radius=4):
15 | self.num_levels = num_levels
16 | self.radius = radius
17 | self.corr_pyramid = []
18 |
19 | # all pairs correlation
20 | corr = CorrBlock.corr(fmap1, fmap2)
21 |
22 | batch, h1, w1, dim, h2, w2 = corr.shape
23 | corr = corr.reshape(batch*h1*w1, dim, h2, w2)
24 |
25 | self.corr_pyramid.append(corr)
26 | for i in range(self.num_levels-1):
27 | corr = F.avg_pool2d(corr, 2, stride=2)
28 | self.corr_pyramid.append(corr)
29 |
30 | def __call__(self, coords):
31 | r = self.radius
32 | coords = coords.permute(0, 2, 3, 1)
33 | batch, h1, w1, _ = coords.shape
34 |
35 | out_pyramid = []
36 | for i in range(self.num_levels):
37 | corr = self.corr_pyramid[i]
38 | dx = torch.linspace(-r, r, 2*r+1)
39 | dy = torch.linspace(-r, r, 2*r+1)
40 | delta = torch.stack(torch.meshgrid(dy, dx), axis=-1).to(coords.device)
41 |
42 | centroid_lvl = coords.reshape(batch*h1*w1, 1, 1, 2) / 2**i
43 | delta_lvl = delta.view(1, 2*r+1, 2*r+1, 2)
44 | coords_lvl = centroid_lvl + delta_lvl
45 |
46 | corr = bilinear_sampler(corr, coords_lvl)
47 | corr = corr.view(batch, h1, w1, -1)
48 | out_pyramid.append(corr)
49 |
50 | out = torch.cat(out_pyramid, dim=-1)
51 | return out.permute(0, 3, 1, 2).contiguous().float()
52 |
53 | @staticmethod
54 | def corr(fmap1, fmap2):
55 | batch, dim, ht, wd = fmap1.shape
56 | fmap1 = fmap1.view(batch, dim, ht*wd)
57 | fmap2 = fmap2.view(batch, dim, ht*wd)
58 |
59 | corr = torch.matmul(fmap1.transpose(1,2), fmap2)
60 | corr = corr.view(batch, ht, wd, 1, ht, wd)
61 | return corr / torch.sqrt(torch.tensor(dim).float())
62 |
63 |
64 | class AlternateCorrBlock:
65 | def __init__(self, fmap1, fmap2, num_levels=4, radius=4):
66 | self.num_levels = num_levels
67 | self.radius = radius
68 |
69 | self.pyramid = [(fmap1, fmap2)]
70 | for i in range(self.num_levels):
71 | fmap1 = F.avg_pool2d(fmap1, 2, stride=2)
72 | fmap2 = F.avg_pool2d(fmap2, 2, stride=2)
73 | self.pyramid.append((fmap1, fmap2))
74 |
75 | def __call__(self, coords):
76 | coords = coords.permute(0, 2, 3, 1)
77 | B, H, W, _ = coords.shape
78 | dim = self.pyramid[0][0].shape[1]
79 |
80 | corr_list = []
81 | for i in range(self.num_levels):
82 | r = self.radius
83 | fmap1_i = self.pyramid[0][0].permute(0, 2, 3, 1).contiguous()
84 | fmap2_i = self.pyramid[i][1].permute(0, 2, 3, 1).contiguous()
85 |
86 | coords_i = (coords / 2**i).reshape(B, 1, H, W, 2).contiguous()
87 | corr, = alt_cuda_corr.forward(fmap1_i, fmap2_i, coords_i, r)
88 | corr_list.append(corr.squeeze(1))
89 |
90 | corr = torch.stack(corr_list, dim=1)
91 | corr = corr.reshape(B, -1, H, W)
92 | return corr / torch.sqrt(torch.tensor(dim).float())
93 |
--------------------------------------------------------------------------------
/afd/RAFT/core/raft.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import torch
3 | import torch.nn as nn
4 | import torch.nn.functional as F
5 |
6 | from RAFT.core.update import BasicUpdateBlock, SmallUpdateBlock
7 | from RAFT.core.extractor import BasicEncoder, SmallEncoder
8 | from RAFT.core.corr import CorrBlock, AlternateCorrBlock
9 | from RAFT.core.utils.utils import bilinear_sampler, coords_grid, upflow8
10 |
11 | # from update import BasicUpdateBlock, SmallUpdateBlock
12 | # from extractor import BasicEncoder, SmallEncoder
13 | # from corr import CorrBlock, AlternateCorrBlock
14 | # from utils.utils import bilinear_sampler, coords_grid, upflow8
15 |
16 | try:
17 | autocast = torch.cuda.amp.autocast
18 | except:
19 | # dummy autocast for PyTorch < 1.6
20 | class autocast:
21 | def __init__(self, enabled):
22 | pass
23 | def __enter__(self):
24 | pass
25 | def __exit__(self, *args):
26 | pass
27 |
28 |
29 | class RAFT(nn.Module):
30 | def __init__(self, args):
31 | super(RAFT, self).__init__()
32 | self.args = args
33 |
34 | if args.small:
35 | self.hidden_dim = hdim = 96
36 | self.context_dim = cdim = 64
37 | args.corr_levels = 4
38 | args.corr_radius = 3
39 |
40 | else:
41 | self.hidden_dim = hdim = 128
42 | self.context_dim = cdim = 128
43 | args.corr_levels = 4
44 | args.corr_radius = 4
45 |
46 | if 'dropout' not in self.args:
47 | self.args.dropout = 0
48 |
49 | if 'alternate_corr' not in self.args:
50 | self.args.alternate_corr = False
51 |
52 | # feature network, context network, and update block
53 | if args.small:
54 | self.fnet = SmallEncoder(output_dim=128, norm_fn='instance', dropout=args.dropout)
55 | self.cnet = SmallEncoder(output_dim=hdim+cdim, norm_fn='none', dropout=args.dropout)
56 | self.update_block = SmallUpdateBlock(self.args, hidden_dim=hdim)
57 |
58 | else:
59 | self.fnet = BasicEncoder(output_dim=256, norm_fn='instance', dropout=args.dropout)
60 | self.cnet = BasicEncoder(output_dim=hdim+cdim, norm_fn='batch', dropout=args.dropout)
61 | self.update_block = BasicUpdateBlock(self.args, hidden_dim=hdim)
62 |
63 | def freeze_bn(self):
64 | for m in self.modules():
65 | if isinstance(m, nn.BatchNorm2d):
66 | m.eval()
67 |
68 | def initialize_flow(self, img):
69 | """ Flow is represented as difference between two coordinate grids flow = coords1 - coords0"""
70 | N, C, H, W = img.shape
71 | coords0 = coords_grid(N, H//8, W//8).to(img.device)
72 | coords1 = coords_grid(N, H//8, W//8).to(img.device)
73 |
74 | # optical flow computed as difference: flow = coords1 - coords0
75 | return coords0, coords1
76 |
77 | def upsample_flow(self, flow, mask):
78 | """ Upsample flow field [H/8, W/8, 2] -> [H, W, 2] using convex combination """
79 | N, _, H, W = flow.shape
80 | mask = mask.view(N, 1, 9, 8, 8, H, W)
81 | mask = torch.softmax(mask, dim=2)
82 |
83 | up_flow = F.unfold(8 * flow, [3,3], padding=1)
84 | up_flow = up_flow.view(N, 2, 9, 1, 1, H, W)
85 |
86 | up_flow = torch.sum(mask * up_flow, dim=2)
87 | up_flow = up_flow.permute(0, 1, 4, 2, 5, 3)
88 | return up_flow.reshape(N, 2, 8*H, 8*W)
89 |
90 |
91 | def forward(self, image1, image2, iters=12, flow_init=None, upsample=True, test_mode=False):
92 | """ Estimate optical flow between pair of frames """
93 |
94 | image1 = 2 * (image1 / 255.0) - 1.0
95 | image2 = 2 * (image2 / 255.0) - 1.0
96 |
97 | image1 = image1.contiguous()
98 | image2 = image2.contiguous()
99 |
100 | hdim = self.hidden_dim
101 | cdim = self.context_dim
102 |
103 | # run the feature network
104 | with autocast(enabled=self.args.mixed_precision):
105 | fmap1, fmap2 = self.fnet([image1, image2])
106 |
107 | fmap1 = fmap1.float()
108 | fmap2 = fmap2.float()
109 | if self.args.alternate_corr:
110 | corr_fn = AlternateCorrBlock(fmap1, fmap2, radius=self.args.corr_radius)
111 | else:
112 | corr_fn = CorrBlock(fmap1, fmap2, radius=self.args.corr_radius)
113 |
114 | # run the context network
115 | with autocast(enabled=self.args.mixed_precision):
116 | cnet = self.cnet(image1)
117 | net, inp = torch.split(cnet, [hdim, cdim], dim=1)
118 | net = torch.tanh(net)
119 | inp = torch.relu(inp)
120 |
121 | coords0, coords1 = self.initialize_flow(image1)
122 |
123 | if flow_init is not None:
124 | coords1 = coords1 + flow_init
125 |
126 | flow_predictions = []
127 | for itr in range(iters):
128 | coords1 = coords1.detach()
129 | corr = corr_fn(coords1) # index correlation volume
130 |
131 | flow = coords1 - coords0
132 | with autocast(enabled=self.args.mixed_precision):
133 | net, up_mask, delta_flow = self.update_block(net, inp, corr, flow)
134 |
135 | # F(t+1) = F(t) + \Delta(t)
136 | coords1 = coords1 + delta_flow
137 |
138 | # upsample predictions
139 | if up_mask is None:
140 | flow_up = upflow8(coords1 - coords0)
141 | else:
142 | flow_up = self.upsample_flow(coords1 - coords0, up_mask)
143 |
144 | flow_predictions.append(flow_up)
145 |
146 | if test_mode:
147 | return coords1 - coords0, flow_up
148 |
149 | return flow_predictions
150 |
--------------------------------------------------------------------------------
/afd/RAFT/core/update.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import torch.nn.functional as F
4 |
5 |
6 | class FlowHead(nn.Module):
7 | def __init__(self, input_dim=128, hidden_dim=256):
8 | super(FlowHead, self).__init__()
9 | self.conv1 = nn.Conv2d(input_dim, hidden_dim, 3, padding=1)
10 | self.conv2 = nn.Conv2d(hidden_dim, 2, 3, padding=1)
11 | self.relu = nn.ReLU(inplace=True)
12 |
13 | def forward(self, x):
14 | return self.conv2(self.relu(self.conv1(x)))
15 |
16 | class ConvGRU(nn.Module):
17 | def __init__(self, hidden_dim=128, input_dim=192+128):
18 | super(ConvGRU, self).__init__()
19 | self.convz = nn.Conv2d(hidden_dim+input_dim, hidden_dim, 3, padding=1)
20 | self.convr = nn.Conv2d(hidden_dim+input_dim, hidden_dim, 3, padding=1)
21 | self.convq = nn.Conv2d(hidden_dim+input_dim, hidden_dim, 3, padding=1)
22 |
23 | def forward(self, h, x):
24 | hx = torch.cat([h, x], dim=1)
25 |
26 | z = torch.sigmoid(self.convz(hx))
27 | r = torch.sigmoid(self.convr(hx))
28 | q = torch.tanh(self.convq(torch.cat([r*h, x], dim=1)))
29 |
30 | h = (1-z) * h + z * q
31 | return h
32 |
33 | class SepConvGRU(nn.Module):
34 | def __init__(self, hidden_dim=128, input_dim=192+128):
35 | super(SepConvGRU, self).__init__()
36 | self.convz1 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (1,5), padding=(0,2))
37 | self.convr1 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (1,5), padding=(0,2))
38 | self.convq1 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (1,5), padding=(0,2))
39 |
40 | self.convz2 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (5,1), padding=(2,0))
41 | self.convr2 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (5,1), padding=(2,0))
42 | self.convq2 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (5,1), padding=(2,0))
43 |
44 |
45 | def forward(self, h, x):
46 | # horizontal
47 | hx = torch.cat([h, x], dim=1)
48 | z = torch.sigmoid(self.convz1(hx))
49 | r = torch.sigmoid(self.convr1(hx))
50 | q = torch.tanh(self.convq1(torch.cat([r*h, x], dim=1)))
51 | h = (1-z) * h + z * q
52 |
53 | # vertical
54 | hx = torch.cat([h, x], dim=1)
55 | z = torch.sigmoid(self.convz2(hx))
56 | r = torch.sigmoid(self.convr2(hx))
57 | q = torch.tanh(self.convq2(torch.cat([r*h, x], dim=1)))
58 | h = (1-z) * h + z * q
59 |
60 | return h
61 |
62 | class SmallMotionEncoder(nn.Module):
63 | def __init__(self, args):
64 | super(SmallMotionEncoder, self).__init__()
65 | cor_planes = args.corr_levels * (2*args.corr_radius + 1)**2
66 | self.convc1 = nn.Conv2d(cor_planes, 96, 1, padding=0)
67 | self.convf1 = nn.Conv2d(2, 64, 7, padding=3)
68 | self.convf2 = nn.Conv2d(64, 32, 3, padding=1)
69 | self.conv = nn.Conv2d(128, 80, 3, padding=1)
70 |
71 | def forward(self, flow, corr):
72 | cor = F.relu(self.convc1(corr))
73 | flo = F.relu(self.convf1(flow))
74 | flo = F.relu(self.convf2(flo))
75 | cor_flo = torch.cat([cor, flo], dim=1)
76 | out = F.relu(self.conv(cor_flo))
77 | return torch.cat([out, flow], dim=1)
78 |
79 | class BasicMotionEncoder(nn.Module):
80 | def __init__(self, args):
81 | super(BasicMotionEncoder, self).__init__()
82 | cor_planes = args.corr_levels * (2*args.corr_radius + 1)**2
83 | self.convc1 = nn.Conv2d(cor_planes, 256, 1, padding=0)
84 | self.convc2 = nn.Conv2d(256, 192, 3, padding=1)
85 | self.convf1 = nn.Conv2d(2, 128, 7, padding=3)
86 | self.convf2 = nn.Conv2d(128, 64, 3, padding=1)
87 | self.conv = nn.Conv2d(64+192, 128-2, 3, padding=1)
88 |
89 | def forward(self, flow, corr):
90 | cor = F.relu(self.convc1(corr))
91 | cor = F.relu(self.convc2(cor))
92 | flo = F.relu(self.convf1(flow))
93 | flo = F.relu(self.convf2(flo))
94 |
95 | cor_flo = torch.cat([cor, flo], dim=1)
96 | out = F.relu(self.conv(cor_flo))
97 | return torch.cat([out, flow], dim=1)
98 |
99 | class SmallUpdateBlock(nn.Module):
100 | def __init__(self, args, hidden_dim=96):
101 | super(SmallUpdateBlock, self).__init__()
102 | self.encoder = SmallMotionEncoder(args)
103 | self.gru = ConvGRU(hidden_dim=hidden_dim, input_dim=82+64)
104 | self.flow_head = FlowHead(hidden_dim, hidden_dim=128)
105 |
106 | def forward(self, net, inp, corr, flow):
107 | motion_features = self.encoder(flow, corr)
108 | inp = torch.cat([inp, motion_features], dim=1)
109 | net = self.gru(net, inp)
110 | delta_flow = self.flow_head(net)
111 |
112 | return net, None, delta_flow
113 |
114 | class BasicUpdateBlock(nn.Module):
115 | def __init__(self, args, hidden_dim=128, input_dim=128):
116 | super(BasicUpdateBlock, self).__init__()
117 | self.args = args
118 | self.encoder = BasicMotionEncoder(args)
119 | self.gru = SepConvGRU(hidden_dim=hidden_dim, input_dim=128+hidden_dim)
120 | self.flow_head = FlowHead(hidden_dim, hidden_dim=256)
121 |
122 | self.mask = nn.Sequential(
123 | nn.Conv2d(128, 256, 3, padding=1),
124 | nn.ReLU(inplace=True),
125 | nn.Conv2d(256, 64*9, 1, padding=0))
126 |
127 | def forward(self, net, inp, corr, flow, upsample=True):
128 | motion_features = self.encoder(flow, corr)
129 | inp = torch.cat([inp, motion_features], dim=1)
130 |
131 | net = self.gru(net, inp)
132 | delta_flow = self.flow_head(net)
133 |
134 | # scale mask to balence gradients
135 | mask = .25 * self.mask(net)
136 | return net, mask, delta_flow
137 |
138 |
139 |
140 |
--------------------------------------------------------------------------------
/afd/RAFT/core/utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/afd/RAFT/core/utils/__init__.py
--------------------------------------------------------------------------------
/afd/RAFT/core/utils/flow_viz.py:
--------------------------------------------------------------------------------
1 | # Flow visualization code used from https://github.com/tomrunia/OpticalFlow_Visualization
2 |
3 |
4 | # MIT License
5 | #
6 | # Copyright (c) 2018 Tom Runia
7 | #
8 | # Permission is hereby granted, free of charge, to any person obtaining a copy
9 | # of this software and associated documentation files (the "Software"), to deal
10 | # in the Software without restriction, including without limitation the rights
11 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 | # copies of the Software, and to permit persons to whom the Software is
13 | # furnished to do so, subject to conditions.
14 | #
15 | # Author: Tom Runia
16 | # Date Created: 2018-08-03
17 |
18 | import numpy as np
19 |
20 | def make_colorwheel():
21 | """
22 | Generates a color wheel for optical flow visualization as presented in:
23 | Baker et al. "A Database and Evaluation Methodology for Optical Flow" (ICCV, 2007)
24 | URL: http://vision.middlebury.edu/flow/flowEval-iccv07.pdf
25 |
26 | Code follows the original C++ source code of Daniel Scharstein.
27 | Code follows the the Matlab source code of Deqing Sun.
28 |
29 | Returns:
30 | np.ndarray: Color wheel
31 | """
32 |
33 | RY = 15
34 | YG = 6
35 | GC = 4
36 | CB = 11
37 | BM = 13
38 | MR = 6
39 |
40 | ncols = RY + YG + GC + CB + BM + MR
41 | colorwheel = np.zeros((ncols, 3))
42 | col = 0
43 |
44 | # RY
45 | colorwheel[0:RY, 0] = 255
46 | colorwheel[0:RY, 1] = np.floor(255*np.arange(0,RY)/RY)
47 | col = col+RY
48 | # YG
49 | colorwheel[col:col+YG, 0] = 255 - np.floor(255*np.arange(0,YG)/YG)
50 | colorwheel[col:col+YG, 1] = 255
51 | col = col+YG
52 | # GC
53 | colorwheel[col:col+GC, 1] = 255
54 | colorwheel[col:col+GC, 2] = np.floor(255*np.arange(0,GC)/GC)
55 | col = col+GC
56 | # CB
57 | colorwheel[col:col+CB, 1] = 255 - np.floor(255*np.arange(CB)/CB)
58 | colorwheel[col:col+CB, 2] = 255
59 | col = col+CB
60 | # BM
61 | colorwheel[col:col+BM, 2] = 255
62 | colorwheel[col:col+BM, 0] = np.floor(255*np.arange(0,BM)/BM)
63 | col = col+BM
64 | # MR
65 | colorwheel[col:col+MR, 2] = 255 - np.floor(255*np.arange(MR)/MR)
66 | colorwheel[col:col+MR, 0] = 255
67 | return colorwheel
68 |
69 |
70 | def flow_uv_to_colors(u, v, convert_to_bgr=False):
71 | """
72 | Applies the flow color wheel to (possibly clipped) flow components u and v.
73 |
74 | According to the C++ source code of Daniel Scharstein
75 | According to the Matlab source code of Deqing Sun
76 |
77 | Args:
78 | u (np.ndarray): Input horizontal flow of shape [H,W]
79 | v (np.ndarray): Input vertical flow of shape [H,W]
80 | convert_to_bgr (bool, optional): Convert output image to BGR. Defaults to False.
81 |
82 | Returns:
83 | np.ndarray: Flow visualization image of shape [H,W,3]
84 | """
85 | flow_image = np.zeros((u.shape[0], u.shape[1], 3), np.uint8)
86 | colorwheel = make_colorwheel() # shape [55x3]
87 | ncols = colorwheel.shape[0]
88 | rad = np.sqrt(np.square(u) + np.square(v))
89 | a = np.arctan2(-v, -u)/np.pi
90 | fk = (a+1) / 2*(ncols-1)
91 | k0 = np.floor(fk).astype(np.int32)
92 | k1 = k0 + 1
93 | k1[k1 == ncols] = 0
94 | f = fk - k0
95 | for i in range(colorwheel.shape[1]):
96 | tmp = colorwheel[:,i]
97 | col0 = tmp[k0] / 255.0
98 | col1 = tmp[k1] / 255.0
99 | col = (1-f)*col0 + f*col1
100 | idx = (rad <= 1)
101 | col[idx] = 1 - rad[idx] * (1-col[idx])
102 | col[~idx] = col[~idx] * 0.75 # out of range
103 | # Note the 2-i => BGR instead of RGB
104 | ch_idx = 2-i if convert_to_bgr else i
105 | flow_image[:,:,ch_idx] = np.floor(255 * col)
106 | return flow_image
107 |
108 |
109 | def flow_to_image(flow_uv, clip_flow=None, convert_to_bgr=False):
110 | """
111 | Expects a two dimensional flow image of shape.
112 |
113 | Args:
114 | flow_uv (np.ndarray): Flow UV image of shape [H,W,2]
115 | clip_flow (float, optional): Clip maximum of flow values. Defaults to None.
116 | convert_to_bgr (bool, optional): Convert output image to BGR. Defaults to False.
117 |
118 | Returns:
119 | np.ndarray: Flow visualization image of shape [H,W,3]
120 | """
121 | assert flow_uv.ndim == 3, 'input flow must have three dimensions'
122 | assert flow_uv.shape[2] == 2, 'input flow must have shape [H,W,2]'
123 | if clip_flow is not None:
124 | flow_uv = np.clip(flow_uv, 0, clip_flow)
125 | u = flow_uv[:,:,0]
126 | v = flow_uv[:,:,1]
127 | rad = np.sqrt(np.square(u) + np.square(v))
128 | rad_max = np.max(rad)
129 | epsilon = 1e-5
130 | u = u / (rad_max + epsilon)
131 | v = v / (rad_max + epsilon)
132 | return flow_uv_to_colors(u, v, convert_to_bgr)
--------------------------------------------------------------------------------
/afd/RAFT/core/utils/frame_utils.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from PIL import Image
3 | from os.path import *
4 | import re
5 |
6 | import cv2
7 | cv2.setNumThreads(0)
8 | cv2.ocl.setUseOpenCL(False)
9 |
10 | TAG_CHAR = np.array([202021.25], np.float32)
11 |
12 | def readFlow(fn):
13 | """ Read .flo file in Middlebury format"""
14 | # Code adapted from:
15 | # http://stackoverflow.com/questions/28013200/reading-middlebury-flow-files-with-python-bytes-array-numpy
16 |
17 | # WARNING: this will work on little-endian architectures (eg Intel x86) only!
18 | # print 'fn = %s'%(fn)
19 | with open(fn, 'rb') as f:
20 | magic = np.fromfile(f, np.float32, count=1)
21 | if 202021.25 != magic:
22 | print('Magic number incorrect. Invalid .flo file')
23 | return None
24 | else:
25 | w = np.fromfile(f, np.int32, count=1)
26 | h = np.fromfile(f, np.int32, count=1)
27 | # print 'Reading %d x %d flo file\n' % (w, h)
28 | data = np.fromfile(f, np.float32, count=2*int(w)*int(h))
29 | # Reshape data into 3D array (columns, rows, bands)
30 | # The reshape here is for visualization, the original code is (w,h,2)
31 | return np.resize(data, (int(h), int(w), 2))
32 |
33 | def readPFM(file):
34 | file = open(file, 'rb')
35 |
36 | color = None
37 | width = None
38 | height = None
39 | scale = None
40 | endian = None
41 |
42 | header = file.readline().rstrip()
43 | if header == b'PF':
44 | color = True
45 | elif header == b'Pf':
46 | color = False
47 | else:
48 | raise Exception('Not a PFM file.')
49 |
50 | dim_match = re.match(rb'^(\d+)\s(\d+)\s$', file.readline())
51 | if dim_match:
52 | width, height = map(int, dim_match.groups())
53 | else:
54 | raise Exception('Malformed PFM header.')
55 |
56 | scale = float(file.readline().rstrip())
57 | if scale < 0: # little-endian
58 | endian = '<'
59 | scale = -scale
60 | else:
61 | endian = '>' # big-endian
62 |
63 | data = np.fromfile(file, endian + 'f')
64 | shape = (height, width, 3) if color else (height, width)
65 |
66 | data = np.reshape(data, shape)
67 | data = np.flipud(data)
68 | return data
69 |
70 | def writeFlow(filename,uv,v=None):
71 | """ Write optical flow to file.
72 |
73 | If v is None, uv is assumed to contain both u and v channels,
74 | stacked in depth.
75 | Original code by Deqing Sun, adapted from Daniel Scharstein.
76 | """
77 | nBands = 2
78 |
79 | if v is None:
80 | assert(uv.ndim == 3)
81 | assert(uv.shape[2] == 2)
82 | u = uv[:,:,0]
83 | v = uv[:,:,1]
84 | else:
85 | u = uv
86 |
87 | assert(u.shape == v.shape)
88 | height,width = u.shape
89 | f = open(filename,'wb')
90 | # write the header
91 | f.write(TAG_CHAR)
92 | np.array(width).astype(np.int32).tofile(f)
93 | np.array(height).astype(np.int32).tofile(f)
94 | # arrange into matrix form
95 | tmp = np.zeros((height, width*nBands))
96 | tmp[:,np.arange(width)*2] = u
97 | tmp[:,np.arange(width)*2 + 1] = v
98 | tmp.astype(np.float32).tofile(f)
99 | f.close()
100 |
101 |
102 | def readFlowKITTI(filename):
103 | flow = cv2.imread(filename, cv2.IMREAD_ANYDEPTH|cv2.IMREAD_COLOR)
104 | flow = flow[:,:,::-1].astype(np.float32)
105 | flow, valid = flow[:, :, :2], flow[:, :, 2]
106 | flow = (flow - 2**15) / 64.0
107 | return flow, valid
108 |
109 | def readDispKITTI(filename):
110 | disp = cv2.imread(filename, cv2.IMREAD_ANYDEPTH) / 256.0
111 | valid = disp > 0.0
112 | flow = np.stack([-disp, np.zeros_like(disp)], -1)
113 | return flow, valid
114 |
115 |
116 | def writeFlowKITTI(filename, uv):
117 | uv = 64.0 * uv + 2**15
118 | valid = np.ones([uv.shape[0], uv.shape[1], 1])
119 | uv = np.concatenate([uv, valid], axis=-1).astype(np.uint16)
120 | cv2.imwrite(filename, uv[..., ::-1])
121 |
122 |
123 | def read_gen(file_name, pil=False):
124 | ext = splitext(file_name)[-1]
125 | if ext == '.png' or ext == '.jpeg' or ext == '.ppm' or ext == '.jpg':
126 | return Image.open(file_name)
127 | elif ext == '.bin' or ext == '.raw':
128 | return np.load(file_name)
129 | elif ext == '.flo':
130 | return readFlow(file_name).astype(np.float32)
131 | elif ext == '.pfm':
132 | flow = readPFM(file_name).astype(np.float32)
133 | if len(flow.shape) == 2:
134 | return flow
135 | else:
136 | return flow[:, :, :-1]
137 | return []
--------------------------------------------------------------------------------
/afd/RAFT/core/utils/utils.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn.functional as F
3 | import numpy as np
4 | from scipy import interpolate
5 |
6 |
7 | class InputPadder:
8 | """ Pads images such that dimensions are divisible by 8 """
9 | def __init__(self, dims, mode='sintel'):
10 | self.ht, self.wd = dims[-2:]
11 | pad_ht = (((self.ht // 8) + 1) * 8 - self.ht) % 8
12 | pad_wd = (((self.wd // 8) + 1) * 8 - self.wd) % 8
13 | if mode == 'sintel':
14 | self._pad = [pad_wd//2, pad_wd - pad_wd//2, pad_ht//2, pad_ht - pad_ht//2]
15 | else:
16 | self._pad = [pad_wd//2, pad_wd - pad_wd//2, 0, pad_ht]
17 |
18 | def pad(self, *inputs):
19 | return [F.pad(x, self._pad, mode='replicate') for x in inputs]
20 |
21 | def unpad(self,x):
22 | ht, wd = x.shape[-2:]
23 | c = [self._pad[2], ht-self._pad[3], self._pad[0], wd-self._pad[1]]
24 | return x[..., c[0]:c[1], c[2]:c[3]]
25 |
26 | def forward_interpolate(flow):
27 | flow = flow.detach().cpu().numpy()
28 | dx, dy = flow[0], flow[1]
29 |
30 | ht, wd = dx.shape
31 | x0, y0 = np.meshgrid(np.arange(wd), np.arange(ht))
32 |
33 | x1 = x0 + dx
34 | y1 = y0 + dy
35 |
36 | x1 = x1.reshape(-1)
37 | y1 = y1.reshape(-1)
38 | dx = dx.reshape(-1)
39 | dy = dy.reshape(-1)
40 |
41 | valid = (x1 > 0) & (x1 < wd) & (y1 > 0) & (y1 < ht)
42 | x1 = x1[valid]
43 | y1 = y1[valid]
44 | dx = dx[valid]
45 | dy = dy[valid]
46 |
47 | flow_x = interpolate.griddata(
48 | (x1, y1), dx, (x0, y0), method='nearest', fill_value=0)
49 |
50 | flow_y = interpolate.griddata(
51 | (x1, y1), dy, (x0, y0), method='nearest', fill_value=0)
52 |
53 | flow = np.stack([flow_x, flow_y], axis=0)
54 | return torch.from_numpy(flow).float()
55 |
56 |
57 | def bilinear_sampler(img, coords, mode='bilinear', mask=False):
58 | """ Wrapper for grid_sample, uses pixel coordinates """
59 | H, W = img.shape[-2:]
60 | xgrid, ygrid = coords.split([1,1], dim=-1)
61 | xgrid = 2*xgrid/(W-1) - 1
62 | ygrid = 2*ygrid/(H-1) - 1
63 |
64 | grid = torch.cat([xgrid, ygrid], dim=-1)
65 | img = F.grid_sample(img, grid, align_corners=True)
66 |
67 | if mask:
68 | mask = (xgrid > -1) & (ygrid > -1) & (xgrid < 1) & (ygrid < 1)
69 | return img, mask.float()
70 |
71 | return img
72 |
73 |
74 | def coords_grid(batch, ht, wd):
75 | coords = torch.meshgrid(torch.arange(ht), torch.arange(wd))
76 | coords = torch.stack(coords[::-1], dim=0).float()
77 | return coords[None].repeat(batch, 1, 1, 1)
78 |
79 |
80 | def upflow8(flow, mode='bilinear'):
81 | new_size = (8 * flow.shape[2], 8 * flow.shape[3])
82 | return 8 * F.interpolate(flow, size=new_size, mode=mode, align_corners=True)
83 |
--------------------------------------------------------------------------------
/afd/RAFT/demo.py:
--------------------------------------------------------------------------------
1 | import sys
2 | sys.path.append('core')
3 |
4 | import argparse
5 | import os
6 | import cv2
7 | import glob
8 | import numpy as np
9 | import torch
10 | from PIL import Image
11 |
12 | from raft import RAFT
13 | from utils import flow_viz
14 | from utils.utils import InputPadder
15 |
16 | import matplotlib.pyplot as plt
17 |
18 |
19 | DEVICE = 'cuda'
20 |
21 | def load_image(imfile):
22 | img = np.array(Image.open(imfile)).astype(np.uint8)
23 | img = torch.from_numpy(img).permute(2, 0, 1).float()
24 | return img[None].to(DEVICE)
25 |
26 |
27 | def viz(img, flo):
28 | img = img[0].permute(1,2,0).cpu().numpy()
29 | flo = flo[0].permute(1,2,0).cpu().numpy()
30 |
31 | # map flow to rgb image
32 | flo = flow_viz.flow_to_image(flo)
33 | img_flo = np.concatenate([img, flo], axis=0)
34 |
35 | # import matplotlib.pyplot as plt
36 | # plt.imshow(img_flo / 255.0)
37 | # plt.show()
38 |
39 | # cv2.imshow('image', img_flo[:, :, [2,1,0]]/255.0)
40 | # cv2.waitKey()
41 | plt.imshow(img_flo/255.0)
42 | plt.show(block=True)
43 |
44 |
45 | def demo(args):
46 | model = torch.nn.DataParallel(RAFT(args))
47 | model.load_state_dict(torch.load(args.model))
48 |
49 | model = model.module
50 | model.to(DEVICE)
51 | model.eval()
52 |
53 | with torch.no_grad():
54 | images = glob.glob(os.path.join(args.path, '*.png')) + \
55 | glob.glob(os.path.join(args.path, '*.jpg'))
56 |
57 | images = sorted(images)
58 | for imfile1, imfile2 in zip(images[:-1], images[1:]):
59 | image1 = load_image(imfile1)
60 | image2 = load_image(imfile2)
61 |
62 | padder = InputPadder(image1.shape)
63 | image1, image2 = padder.pad(image1, image2)
64 |
65 | flow_low, flow_up = model(image1, image2, iters=20, test_mode=True)
66 | viz(image1, flow_up)
67 |
68 |
69 | if __name__ == '__main__':
70 | parser = argparse.ArgumentParser()
71 | parser.add_argument('--model', help="restore checkpoint")
72 | parser.add_argument('--path', help="dataset for evaluation")
73 | parser.add_argument('--small', action='store_true', help='use small model')
74 | parser.add_argument('--mixed_precision', action='store_true', help='use mixed precision')
75 | parser.add_argument('--alternate_corr', action='store_true', help='use efficent correlation implementation')
76 | args = parser.parse_args()
77 |
78 | demo(args)
79 |
--------------------------------------------------------------------------------
/afd/RAFT/download_models.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | wget https://www.dropbox.com/s/4j4z58wuv8o0mfz/models.zip
3 | unzip models.zip
4 |
--------------------------------------------------------------------------------
/afd/RAFT/models/raft-chairs.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/afd/RAFT/models/raft-chairs.pth
--------------------------------------------------------------------------------
/afd/RAFT/models/raft-kitti.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/afd/RAFT/models/raft-kitti.pth
--------------------------------------------------------------------------------
/afd/RAFT/models/raft-sintel.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/afd/RAFT/models/raft-sintel.pth
--------------------------------------------------------------------------------
/afd/RAFT/models/raft-small.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/afd/RAFT/models/raft-small.pth
--------------------------------------------------------------------------------
/afd/RAFT/models/raft-things.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/afd/RAFT/models/raft-things.pth
--------------------------------------------------------------------------------
/afd/RAFT/train_mixed.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | mkdir -p checkpoints
3 | python -u train.py --name raft-chairs --stage chairs --validation chairs --gpus 0 --num_steps 120000 --batch_size 8 --lr 0.00025 --image_size 368 496 --wdecay 0.0001 --mixed_precision
4 | python -u train.py --name raft-things --stage things --validation sintel --restore_ckpt checkpoints/raft-chairs.pth --gpus 0 --num_steps 120000 --batch_size 5 --lr 0.0001 --image_size 400 720 --wdecay 0.0001 --mixed_precision
5 | python -u train.py --name raft-sintel --stage sintel --validation sintel --restore_ckpt checkpoints/raft-things.pth --gpus 0 --num_steps 120000 --batch_size 5 --lr 0.0001 --image_size 368 768 --wdecay 0.00001 --gamma=0.85 --mixed_precision
6 | python -u train.py --name raft-kitti --stage kitti --validation kitti --restore_ckpt checkpoints/raft-sintel.pth --gpus 0 --num_steps 50000 --batch_size 5 --lr 0.0001 --image_size 288 960 --wdecay 0.00001 --gamma=0.85 --mixed_precision
7 |
--------------------------------------------------------------------------------
/afd/RAFT/train_standard.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | mkdir -p checkpoints
3 | python -u train.py --name raft-chairs --stage chairs --validation chairs --gpus 0 1 --num_steps 100000 --batch_size 10 --lr 0.0004 --image_size 368 496 --wdecay 0.0001
4 | python -u train.py --name raft-things --stage things --validation sintel --restore_ckpt checkpoints/raft-chairs.pth --gpus 0 1 --num_steps 100000 --batch_size 6 --lr 0.000125 --image_size 400 720 --wdecay 0.0001
5 | python -u train.py --name raft-sintel --stage sintel --validation sintel --restore_ckpt checkpoints/raft-things.pth --gpus 0 1 --num_steps 100000 --batch_size 6 --lr 0.000125 --image_size 368 768 --wdecay 0.00001 --gamma=0.85
6 | python -u train.py --name raft-kitti --stage kitti --validation kitti --restore_ckpt checkpoints/raft-sintel.pth --gpus 0 1 --num_steps 50000 --batch_size 6 --lr 0.0001 --image_size 288 960 --wdecay 0.00001 --gamma=0.85
7 |
--------------------------------------------------------------------------------
/afd/SliceViewer.py:
--------------------------------------------------------------------------------
1 | from matplotlib import pyplot as plt
2 |
3 |
4 | class SliceViewer(object):
5 | # vol has to be shape T, H, W, C
6 | def __init__(
7 | self, vol, captions=None, cmap="gray", wrap_around=True, figsize=(16, 4)
8 | ):
9 | self.vol = vol
10 | self.captions = captions
11 | self.wrap_around = wrap_around
12 | self.slices = vol.shape[0]
13 | self.ind = 0
14 |
15 | self.fig, self.ax = plt.subplots(figsize=figsize)
16 | self.fig.canvas.mpl_connect("scroll_event", self.onscroll)
17 | self.im = self.ax.imshow(self.vol[self.ind, ...], cmap=cmap)
18 | self.update()
19 | plt.axis("off")
20 | plt.tight_layout()
21 | plt.show()
22 |
23 | def onscroll(self, event):
24 | if event.button == "up":
25 | if not self.wrap_around and self.ind + 1 == self.slices:
26 | return
27 | self.ind = (self.ind + 1) % self.slices
28 | else:
29 | if not self.wrap_around and self.ind == 0:
30 | return
31 | self.ind = (self.ind - 1) % self.slices
32 | self.update()
33 |
34 | def update(self):
35 | self.im.set_data(self.vol[self.ind, ...])
36 | title_string = f"SCROLL UP/DOWN: {self.ind}/{self.slices - 1}"
37 | self.ax.set_title(title_string)
38 | self.im.axes.figure.canvas.draw()
39 |
--------------------------------------------------------------------------------
/afd/afd_single_video.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | from ast import Slice
3 | import os
4 | from os import listdir, mkdir
5 | from os.path import isfile, join
6 | from pathlib import Path
7 | import flowiz as fz
8 |
9 | import matplotlib.pyplot as plt
10 | import torch
11 | from torchvision.io import read_video, write_video
12 |
13 | from RAFT.core.raft import RAFT
14 | from RAFT.core.utils.utils import InputPadder
15 | from SliceViewer import SliceViewer
16 | from util import batch_warp, upsample_flow, warp
17 | from generate_afd import flow_from_video, firstframe_warp
18 |
19 | parser = argparse.ArgumentParser()
20 | parser.add_argument(
21 | "videopath", type=str, help="Path to a video you want to see as AFD"
22 | )
23 | import torchvision
24 |
25 | torchvision.set_video_backend("video_reader")
26 | from save_helper import save_tensor_as_img, save_tensor_list_as_gif
27 |
28 |
29 | def main():
30 | parser.add_argument(
31 | "--images", type=str, default=None, help="Path where to save individual images"
32 | )
33 | parser.add_argument(
34 | "--gif", type=str, default=None, help="Path where to save output as gif"
35 | )
36 | parser.add_argument(
37 | "--upsample",
38 | type=int,
39 | default=1,
40 | help="Upsample image factor, if image too small to calculate optical flow reliably",
41 | )
42 |
43 | args = parser.parse_args()
44 | args.alternate_corr = False
45 | args.mixed_precision = True
46 | args.small = False
47 |
48 | model = torch.nn.DataParallel(RAFT(args))
49 | model.load_state_dict(torch.load("RAFT/models/raft-sintel.pth"))
50 | model = model.module
51 | model.cuda()
52 | model.eval()
53 |
54 | video_path = args.videopath
55 |
56 | frames = (read_video(video_path)[0]).float()
57 | frame_list = list(frames.permute(0, 3, 1, 2))[::2]
58 |
59 | video = torchvision.io.VideoReader(video_path, "video")
60 | fps = video.get_metadata()["video"]["fps"][0]
61 | frame_duration_ms = 50
62 |
63 | flow = flow_from_video(model, frame_list, upsample_factor=args.upsample)
64 | afd = firstframe_warp(
65 | flow,
66 | usecolor=False,
67 | # seed_image="/home/filip/projects/AppearanceFreeActionRecognition/tmp.jpg",
68 | )
69 | rgbflow = fz.video_flow2color(fz.video_normalized_flow(torch.stack(flow, dim=0)))
70 |
71 | tmp = torch.cat(
72 | [
73 | # frames[:-1, :, 50:-30] / 255.0,
74 | afd[:-1, :, 50:-30] / 255.0,
75 | # torch.from_numpy(rgbflow[:, :, 50:-30])
76 | # / 255.0,
77 | ],
78 | dim=2,
79 | )
80 | if args.gif != None:
81 | save_tensor_list_as_gif(
82 | list(tmp.permute(0, 3, 1, 2)), path=args.gif, duration=frame_duration_ms
83 | )
84 |
85 | if args.images != None:
86 | print(
87 | "to make a high quality gif of the generated images: gifski --fps 30 -o file.gif --quality 100 *.png"
88 | )
89 | Path(join(args.images)).mkdir(parents=True, exist_ok=True)
90 | for i in range(tmp.shape[0]):
91 | save_tensor_as_img(
92 | tmp[i].permute(2, 0, 1), path=f"{args.images}/{i:04d}.png"
93 | )
94 |
95 | # SliceViewer(tmp, figsize=(15, 5))
96 |
97 |
98 | if __name__ == "__main__":
99 | main()
100 |
--------------------------------------------------------------------------------
/afd/generate_ipn.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import glob
3 | import os
4 | from pathlib import Path
5 |
6 | import cv2
7 | import torch
8 | import torchvision
9 | from torchvision.datasets.utils import list_dir
10 | from tqdm import tqdm
11 |
12 | import flowiz as fz
13 | from RAFT.core.raft import RAFT
14 |
15 | # torchvision.set_video_backend("video_reader")
16 | from util import chunker, flow_from_video, set_seed, firstframe_warp
17 |
18 | parser = argparse.ArgumentParser(
19 | description="Create the motion consistent noise dataset"
20 | )
21 | parser.add_argument("--src", type=str, default="data/ipn")
22 |
23 | args = parser.parse_args()
24 |
25 |
26 | def main(list_of_dirs_to_process, src):
27 |
28 | args.alternate_corr = False
29 | args.mixed_precision = True
30 | args.small = False
31 |
32 | model = torch.nn.DataParallel(RAFT(args))
33 | model.load_state_dict(torch.load("afd/RAFT/models/raft-sintel.pth"))
34 | model = model.module
35 | model.cuda()
36 | model.eval()
37 |
38 | for clz in list_of_dirs_to_process:
39 | print(f"Processing {clz}")
40 | image_list = sorted(glob.glob(f"{src}/{clz}/*jpg"))
41 | flow_image_list = sorted(
42 | glob.glob(f"{src.replace('ipn', 'ipn_flow')}/{clz}/*jpg")
43 | )
44 | afd_image_list = sorted(
45 | glob.glob(f"{src.replace('ipn', 'ipn_afd')}/{clz}/*jpg")
46 | )
47 |
48 | if len(image_list) == len(flow_image_list) and len(image_list) == len(
49 | afd_image_list
50 | ):
51 | print(f"already done with {clz}")
52 | continue
53 |
54 | # process_class(clz)
55 | chunksize = 301
56 | for images in tqdm(
57 | chunker(image_list, chunksize),
58 | total=image_list.__len__() // chunksize,
59 | ):
60 | set_seed()
61 | frames = torch.stack(
62 | [torchvision.io.read_image(img).float() for img in images]
63 | )
64 | frame_list = list(frames) # .permute(0, 3, 1, 2))
65 |
66 | flow = flow_from_video(model, frame_list, upsample_factor=4)
67 |
68 | rgbflows = torch.from_numpy(
69 | fz.video_flow2color(
70 | fz.video_normalized_flow(
71 | torch.stack(flow, dim=0),
72 | )
73 | )
74 | )
75 | rgbflows = torch.cat([rgbflows, rgbflows[-1, ...].unsqueeze(0)], dim=0)
76 | afd = firstframe_warp(flow, usecolor=True)
77 |
78 | for im, impath in zip(afd.unbind(0), images):
79 | save_path = impath.replace("ipn", "ipn_afd")
80 | Path(os.path.dirname(save_path)).mkdir(parents=True, exist_ok=True)
81 | cv2.imwrite(
82 | save_path,
83 | im.numpy(),
84 | [cv2.IMWRITE_PNG_COMPRESSION, 9],
85 | )
86 |
87 | for im, impath in zip(rgbflows.unbind(0), images):
88 | save_path = impath.replace("ipn", "ipn_flow")
89 | Path(os.path.dirname(save_path)).mkdir(parents=True, exist_ok=True)
90 | cv2.imwrite(
91 | save_path,
92 | im.numpy(),
93 | [cv2.IMWRITE_PNG_COMPRESSION, 9],
94 | )
95 |
96 |
97 | if __name__ == "__main__":
98 | src = args.src
99 | classes = list(sorted(list_dir(src)))
100 |
101 | processes = []
102 | for cls in classes:
103 | main([cls], src)
104 |
--------------------------------------------------------------------------------
/afd/generate_kth.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import glob
3 | import os
4 | from pathlib import Path
5 |
6 | import cv2
7 | import flowiz as fz
8 | import torch
9 | import torchvision
10 | from RAFT.core.raft import RAFT
11 | from torchvision.datasets.utils import list_dir
12 | from tqdm import tqdm
13 | from util import firstframe_warp
14 |
15 | # torchvision.set_video_backend("video_reader")
16 |
17 | from util import chunker, flow_from_video, set_seed
18 |
19 |
20 | parser = argparse.ArgumentParser(
21 | description="Create the motion consistent noise dataset"
22 | )
23 | parser.add_argument("--src", type=str, default="data/kth")
24 |
25 | args = parser.parse_args()
26 |
27 |
28 | def main(list_of_dirs_to_process, src):
29 |
30 | args.alternate_corr = False
31 | args.mixed_precision = True
32 | args.small = False
33 |
34 | model = torch.nn.DataParallel(RAFT(args))
35 | model.load_state_dict(torch.load("RAFT/models/raft-sintel.pth"))
36 | model = model.module
37 | model.cuda()
38 | model.eval()
39 |
40 | for clz in list_of_dirs_to_process:
41 | print(f"Processing {clz}")
42 | image_list = sorted(glob.glob(f"{src}/{clz}/*png"))
43 | flow_image_list = sorted(
44 | glob.glob(f"{src.replace('kth', 'kth_flow')}/{clz}/*png")
45 | )
46 | afd_image_list = sorted(
47 | glob.glob(f"{src.replace('kth', 'kth_afd')}/{clz}/*png")
48 | )
49 |
50 | if len(image_list) == len(flow_image_list) and len(image_list) == len(
51 | afd_image_list
52 | ):
53 | print(f"already done with {clz}")
54 | continue
55 |
56 | # process_class(clz)
57 | chunksize = 301
58 | for images in tqdm(
59 | chunker(image_list, chunksize),
60 | total=image_list.__len__() // chunksize,
61 | ):
62 | set_seed()
63 | frames = torch.stack(
64 | [torchvision.io.read_image(img).float() for img in images]
65 | )
66 | frame_list = list(frames) # .permute(0, 3, 1, 2))
67 |
68 | flow = flow_from_video(model, frame_list, upsample_factor=4)
69 |
70 | rgbflows = torch.from_numpy(
71 | fz.video_flow2color(
72 | fz.video_normalized_flow(
73 | torch.stack(flow, dim=0),
74 | )
75 | )
76 | )
77 | rgbflows = torch.cat([rgbflows, rgbflows[-1, ...].unsqueeze(0)], dim=0)
78 | afd = firstframe_warp(flow, usecolor=True)
79 |
80 | for im, impath in zip(afd.unbind(0), images):
81 | save_path = impath.replace("kth", "kth_afd")
82 | Path(os.path.dirname(save_path)).mkdir(parents=True, exist_ok=True)
83 | cv2.imwrite(
84 | save_path,
85 | im.numpy(),
86 | [cv2.IMWRITE_PNG_COMPRESSION, 9],
87 | )
88 |
89 | for im, impath in zip(rgbflows.unbind(0), images):
90 | save_path = impath.replace("kth", "kth_flow")
91 | Path(os.path.dirname(save_path)).mkdir(parents=True, exist_ok=True)
92 | cv2.imwrite(
93 | save_path,
94 | im.numpy(),
95 | [cv2.IMWRITE_PNG_COMPRESSION, 9],
96 | )
97 |
98 |
99 | if __name__ == "__main__":
100 | parser = argparse.ArgumentParser(
101 | description="Create the motion consistent noise dataset"
102 | )
103 | parser.add_argument("--src", type=str, default="../data/kth")
104 | args = parser.parse_args()
105 |
106 | src = args.src
107 | classes = list(sorted(list_dir(src)))
108 |
109 | processes = []
110 | for cls in classes:
111 | main([cls], src)
112 |
--------------------------------------------------------------------------------
/afd/generate_sbu.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import glob
3 | import os
4 | from pathlib import Path
5 |
6 | import cv2
7 | import torch
8 | import torchvision
9 | from tqdm import tqdm
10 |
11 | import flowiz as fz
12 | from RAFT.core.raft import RAFT
13 | from util import firstframe_warp
14 |
15 | # torchvision.set_video_backend("video_reader")
16 |
17 | from util import chunker, flow_from_video, set_seed
18 |
19 | parser = argparse.ArgumentParser(
20 | description="Create the motion consistent noise dataset"
21 | )
22 | parser.add_argument("--src", type=str, default="data/ipn")
23 |
24 | args = parser.parse_args()
25 |
26 |
27 | def main(list_of_dirs_to_process, src):
28 | args.alternate_corr = False
29 | args.mixed_precision = True
30 | args.small = False
31 |
32 | model = torch.nn.DataParallel(RAFT(args))
33 | model.load_state_dict(torch.load("RAFT/models/raft-sintel.pth"))
34 | model = model.module
35 | model.cuda()
36 | model.eval()
37 |
38 | for clz in list_of_dirs_to_process:
39 | print(f"Processing {clz}")
40 | image_list = sorted(glob.glob(f"{clz}/rgb_*png"))
41 |
42 | fl_img_list = sorted(glob.glob(f"{clz.replace('sbu', 'sbu_flow')}/rgb_*png"))
43 | afd_image_list = sorted(glob.glob(f"{clz.replace('sbu', 'sbu_afd')}/rgb_*png"))
44 |
45 | if len(image_list) == len(fl_img_list) and len(image_list) == len(
46 | afd_image_list
47 | ):
48 | print(f"already done with {clz}")
49 | continue
50 |
51 | # process_class(clz)
52 | chunksize = 301
53 | for images in chunker(image_list, chunksize):
54 | set_seed()
55 | frames = torch.stack(
56 | [torchvision.io.read_image(img).float() for img in images]
57 | )
58 | frame_list = list(frames) # .permute(0, 3, 1, 2))
59 |
60 | flow = flow_from_video(model, frame_list)
61 |
62 | rgbflows = torch.from_numpy(
63 | fz.video_flow2color(
64 | fz.video_normalized_flow(
65 | torch.stack(flow, dim=0),
66 | )
67 | )
68 | )
69 | rgbflows = torch.cat([rgbflows, rgbflows[-1, ...].unsqueeze(0)], dim=0)
70 | afd = firstframe_warp(flow, usecolor=True)
71 |
72 | for im, impath in zip(afd.unbind(0), images):
73 | save_path = impath.replace("sbu", "sbu_afd")
74 | Path(os.path.dirname(save_path)).mkdir(parents=True, exist_ok=True)
75 | cv2.imwrite(
76 | save_path,
77 | im.numpy(),
78 | [cv2.IMWRITE_PNG_COMPRESSION, 9],
79 | )
80 |
81 | for im, impath in zip(rgbflows.unbind(0), images):
82 | save_path = impath.replace("sbu", "sbu_flow")
83 | Path(os.path.dirname(save_path)).mkdir(parents=True, exist_ok=True)
84 | cv2.imwrite(
85 | save_path,
86 | im.numpy(),
87 | [cv2.IMWRITE_PNG_COMPRESSION, 9],
88 | )
89 |
90 |
91 | if __name__ == "__main__":
92 | parser = argparse.ArgumentParser(
93 | description="Create the motion consistent noise dataset"
94 | )
95 | parser.add_argument("--src", type=str, default="../data/sbu")
96 | args = parser.parse_args()
97 |
98 | src = args.src
99 | classes = list(sorted(glob.glob(f"{src}/**/**/**/")))
100 |
101 | processes = []
102 | for cls in tqdm(classes):
103 | main([cls], src)
104 |
--------------------------------------------------------------------------------
/afd/save_helper.py:
--------------------------------------------------------------------------------
1 | import torchvision.transforms as T
2 |
3 | def save_tensor_list_as_gif(tensors, path='out.gif', duration=200, loop=0):
4 | """
5 | @param tensor: list of C,H,W tensors
6 | @param path: where to save the gif
7 | @param duration: duration of each frame in ms
8 | @param loop: 0 if it should loop forever
9 | """
10 | PIL_img_list = []
11 |
12 | for t in tensors:
13 | PIL_img_list.append(T.ToPILImage()(t))
14 |
15 | PIL_img_list[0].save(path, append_images=PIL_img_list[1:],save_all=True, duration=duration, loop=loop, allow_mixed=False)
16 |
17 | def save_tensor_as_img(tensor, path='out.png'):
18 | """
19 | @param tensor: C,H,W tensor
20 | """
21 | img = T.ToPILImage()(tensor)
22 | img.save(path)
--------------------------------------------------------------------------------
/batch_eval.py:
--------------------------------------------------------------------------------
1 | import itertools
2 | import os
3 | from models.valid_models import valid_models
4 | from models.valid_models import action_models
5 | from models.valid_models import privacy_models
6 |
7 |
8 | """
9 | Skeleton on how to evaluate the runs you want to compare.
10 | The tensorboard files then contain all the necessary information that
11 | you have to parse out with the scripts in the `analysis` directory.
12 | """
13 |
14 |
15 | for k in action_models.keys():
16 | for db in ["ipn", "kth", "sbu"]:
17 | for downsample in ["16"]:
18 | run_string = f"CUDA_VISIBLE_DEVICES=0 python action_eval.py --architecture {k} --datasetname {db} -pretrained --batch_size 1 --downsample {downsample} --interpolation nearest"
19 | os.system(run_string)
20 |
21 | for k in privacy_models.keys():
22 | for db in ["ipn", "kth", "sbu"]:
23 | for downsample in ["16"]:
24 | run_string = f"CUDA_VISIBLE_DEVICES=0 python privacy_eval.py --architecture {k} --datasetname {db} -pretrained --batch_size 1 --downsample {downsample} --interpolation nearest -privacy"
25 | os.system(run_string)
26 |
--------------------------------------------------------------------------------
/batch_train.py:
--------------------------------------------------------------------------------
1 | import itertools
2 | import os
3 | from models.valid_models import valid_models
4 | from models.valid_models import action_models
5 | from models.valid_models import privacy_models
6 |
7 | for k in privacy_models.keys():
8 | for db in ["kth", "ipn", "sbu"]:
9 | for train_backbone in ["", "-train_backbone"]:
10 | to_run = f"CUDA_VISIBLE_DEVICES=0 python privacy_train.py --architecture {k} --datasetname {db} -pretrained {train_backbone} --batch_size 128 --num_epochs 500 --lr 3e-4 -privacy --num_workers 16"
11 | # print(to_run)
12 | os.system(to_run)
13 |
--------------------------------------------------------------------------------
/config.py:
--------------------------------------------------------------------------------
1 | import argparse
2 |
3 | parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
4 |
5 | # Training
6 | parser.add_argument("--datasetname", type=str)
7 | parser.add_argument("--weights_path", type=str, default=None)
8 | parser.add_argument("--num_epochs", type=int, default=80)
9 | parser.add_argument("--num_workers", type=int, default=8)
10 | parser.add_argument("--batch_size", type=int, default=64)
11 | parser.add_argument("--num_frames", type=int, default=0)
12 | parser.add_argument("--lr", type=float, default=3e-4)
13 | parser.add_argument("-pretrained", action="store_true")
14 | parser.add_argument("-train_backbone", action="store_true")
15 | parser.add_argument("-privacy", action="store_true") # Default is ACTION!
16 | # parser.add_argument("-gpu", action="store_true")
17 | parser.add_argument("--gpus", nargs="+", type=int, default=[])
18 | parser.add_argument("--accumulate_grad_batches", type=int, default=1)
19 |
20 |
21 | # Evaluation
22 | parser.add_argument("--masked", default=None) # person / background
23 | parser.add_argument("--downsample", type=int, default=None)
24 | parser.add_argument("--interpolation", default=None) # nearest / bilinear / ...
25 | parser.add_argument("--blur", default=None) # weak / strong / None
26 | parser.add_argument("--afd_combine_level", type=int, default=None)
27 | parser.add_argument("-combine_masked", action="store_true")
28 | parser.add_argument("--downsample_masked", type=int, default=None)
29 | parser.add_argument("--interpolation_masked", default=None) # nearest / bilinear / ...
30 | parser.add_argument("-mean_fill", action="store_true")
31 |
32 | # Our method
33 | parser.add_argument("-selectively_mask", action="store_true")
34 | parser.add_argument("--obfuscate", nargs="+", type=str, default=[])
35 | parser.add_argument("-iid", action="store_true")
36 |
37 | cfg = dict()
38 |
39 |
40 | def build_cfg():
41 | args = parser.parse_args()
42 | cfg = args.__dict__.copy()
43 | print(f"-----------------------------------\n")
44 | print(f"Running Config:")
45 | for k, v in cfg.items():
46 | print(f"{k}: {v}")
47 | print(f"-----------------------------------\n")
48 | return cfg
49 |
--------------------------------------------------------------------------------
/create_table_action.py:
--------------------------------------------------------------------------------
1 | import os
2 | import numpy as np
3 | from tbparse import SummaryReader
4 | import seaborn as sns
5 | import matplotlib.pyplot as plt
6 | import glob
7 | import pandas as pd
8 |
9 | sns.set_style("whitegrid")
10 | sns.set_style("darkgrid")
11 |
12 |
13 | df_all = pd.DataFrame()
14 |
15 | # datasets = ["kth", "ipn", "sbu"]
16 | datasets = ["sbu"]
17 |
18 | for dataset in datasets:
19 | base_dir = f"runs/action_eval/{dataset}/"
20 |
21 | tbfiles = []
22 | for filename in glob.glob(f"{base_dir}/**/events.out.tfevents.*", recursive=True):
23 | tbfiles.append(filename)
24 |
25 | for tbfile in tbfiles:
26 | name = tbfile.replace(base_dir, "").split("/")[1].split("__")[0]
27 | experiment = tbfile.replace(base_dir, "").split("/")[0]
28 | experiment = experiment.replace(
29 | "____pretrained__True____train_backbone__True", ""
30 | )
31 |
32 | experiment = experiment.replace("_", " ")
33 | experiment = experiment.replace("person", "person\n")
34 | experiment = experiment.replace("background", "background\n")
35 | experiment = experiment.replace("downsample masked", "downsample")
36 | experiment = experiment.replace("afd combine level", "afd level")
37 | experiment = experiment.replace(" ", " ")
38 | reader = SummaryReader(tbfile)
39 | df = reader.scalars
40 | df["name"] = name
41 | df["experiment"] = experiment
42 | df["dataset"] = dataset
43 |
44 | print(f"{dataset} \t {name} \t {experiment}")
45 |
46 | t = df[df.tag == "test/top1"]
47 | # t = t.loc[t["value"].idxmax()]
48 | t = t.loc[t["value"]]
49 |
50 | bestepoch = (
51 | "/".join(tbfile.split("/")[:-2]) + f"/models/checkpoint_epoch{t.step}.pt"
52 | )
53 | bestpt = "/".join(tbfile.split("/")[:-3]) + "/best.pt"
54 |
55 | t["path"] = bestepoch
56 | if not os.path.exists(bestepoch):
57 | bestepoch = (
58 | "/".join(tbfile.split("/")[:-2])
59 | + f"/models/checkpoint_epoch{t.step+5}.pt"
60 | )
61 |
62 | if os.path.exists(bestepoch):
63 | os.system(f"cp {bestepoch} {bestpt}")
64 | print(f"\t \t {t.step} -> best.pt")
65 | else:
66 | print(f"{bestepoch} does not exist")
67 |
68 | df_all = pd.concat([df_all, pd.DataFrame([dict(t)])])
69 |
70 | # tmp.loc[tmp['value'].idxmax()]
71 | tmp = df_all[(df_all.tag == "test/top1")]
72 | individual = tmp.groupby(["experiment", "dataset"]).max()[
73 | "value"
74 | ] # for each experiment, get the max value for each dataset
75 |
76 | averages = (
77 | tmp.groupby(["experiment", "dataset"]).max()["value"].groupby("dataset").mean()
78 | ) # mean over datasets
79 |
80 |
81 | LATEX_individual = pd.pivot_table(
82 | pd.DataFrame(individual), index="experiment", columns="dataset", values="value"
83 | )
84 |
85 | averages = (
86 | tmp.groupby(["experiment", "dataset"]).max()["value"].groupby("dataset").mean()
87 | )
88 | LATEX_averages = pd.pivot_table(
89 | pd.DataFrame(averages), columns="dataset", values="value"
90 | )
91 |
92 | print(
93 | LATEX_individual.to_latex(
94 | formatters={"name": str.upper},
95 | float_format="{:.2f}".format,
96 | )
97 | )
98 |
99 | print(
100 | LATEX_averages.to_latex(
101 | formatters={"name": str.upper},
102 | float_format="{:.2f}".format,
103 | )
104 | )
105 |
--------------------------------------------------------------------------------
/create_table_privacy.py:
--------------------------------------------------------------------------------
1 | import os
2 | import numpy as np
3 | from tbparse import SummaryReader
4 | import seaborn as sns
5 | import matplotlib.pyplot as plt
6 | import glob
7 | import pandas as pd
8 |
9 | sns.set_style("whitegrid")
10 | sns.set_style("darkgrid")
11 |
12 |
13 | df_all = pd.DataFrame()
14 |
15 | # datasets = ["kth", "ipn", "sbu"]
16 | # datasets = ["ipn", "sbu"]
17 | # datasets = ["kth"]
18 | datasets = ["ipn"]
19 |
20 | for dataset in datasets:
21 | base_dir = f"runs/privacy/{dataset}"
22 |
23 | tbfiles = []
24 | for filename in glob.glob(f"{base_dir}/**/events.out.tfevents.*", recursive=True):
25 | tbfiles.append(filename)
26 |
27 | print("alive")
28 |
29 | for tbfile in tbfiles:
30 | name = tbfile.replace(base_dir, "").split("/")[1].split("__")[0]
31 | experiment = tbfile.replace(base_dir, "").split("/")[1]
32 | experiment = experiment.replace(
33 | "____pretrained__True____train_backbone__True", ""
34 | )
35 | experiment = experiment.replace(
36 | "____pretrained__True____train_backbone__False", ""
37 | )
38 |
39 | experiment = experiment.replace("_", " ")
40 | # experiment = experiment.replace("person", "person\n")
41 | # experiment = experiment.replace("background", "background\n")
42 | # experiment = experiment.replace("downsample masked", "downsample")
43 | # experiment = experiment.replace("interpolation maskednearest", "")
44 | # experiment = experiment.replace("afd combine level", "afd level")
45 | # experiment = experiment.replace(" ", " ")
46 |
47 | reader = SummaryReader(tbfile)
48 | df = reader.scalars
49 | df["name"] = name
50 | df["experiment"] = experiment
51 | df["dataset"] = dataset
52 |
53 | t = df[df.tag == "test/top1"]
54 | t = t.loc[t["value"].idxmax()]
55 |
56 | bestepoch = (
57 | "/".join(tbfile.split("/")[:-2]) + f"/models/checkpoint_epoch{t.step}.pt"
58 | )
59 | bestpt = "/".join(tbfile.split("/")[:-3]) + "/best.pt"
60 |
61 | t["path"] = bestepoch
62 | if not os.path.exists(bestepoch):
63 | for i in np.arange(1, 100, 1):
64 | bestepoch = (
65 | "/".join(tbfile.split("/")[:-2])
66 | + f"/models/checkpoint_epoch{t.step+i}.pt"
67 | )
68 | if os.path.exists(bestepoch):
69 | break
70 |
71 | print(f"{dataset} \t {name} \t {experiment} \t {t.value:.2f}")
72 | if os.path.exists(bestepoch):
73 | os.system(f"cp {bestepoch} {bestpt}")
74 | print(f"\t \t {t.step} -> best.pt")
75 | else:
76 | print(f"{bestepoch} does not exist")
77 |
78 | df_all = pd.concat([df_all, pd.DataFrame([dict(t)])])
79 |
80 | tmp = df_all[(df_all.tag == "test/top1")]
81 | individual = tmp.groupby(["experiment", "dataset"]).max()[
82 | "value"
83 | ] # for each experiment, get the max value for each dataset
84 |
85 | averages = (
86 | tmp.groupby(["experiment", "dataset"]).max()["value"].groupby("dataset").mean()
87 | ) # mean over datasets
88 |
89 | LATEX_individual = pd.pivot_table(
90 | pd.DataFrame(individual), index="experiment", columns="dataset", values="value"
91 | )
92 |
93 | averages = (
94 | tmp.groupby(["experiment", "dataset"]).max()["value"].groupby("dataset").mean()
95 | )
96 | LATEX_averages = pd.pivot_table(
97 | pd.DataFrame(averages), columns="dataset", values="value"
98 | )
99 |
100 | print(
101 | LATEX_individual.to_latex(
102 | formatters={"name": str.upper},
103 | float_format="{:.2f}".format,
104 | )
105 | )
106 | print(
107 | LATEX_averages.to_latex(
108 | formatters={"name": str.upper},
109 | float_format="{:.2f}".format,
110 | )
111 | )
112 |
--------------------------------------------------------------------------------
/data/splits/ipn/Video_TestList.txt:
--------------------------------------------------------------------------------
1 | 1CM1_1_R_#217 3855
2 | 1CM1_1_R_#218 3656
3 | 1CM1_1_R_#219 3922
4 | 1CM1_1_R_#220 3890
5 | 1CM1_2_R_#221 3769
6 | 1CM1_2_R_#222 3651
7 | 1CM1_2_R_#223 3989
8 | 1CM1_2_R_#224 3823
9 | 1CM1_3_R_#225 4261
10 | 1CM1_3_R_#226 4795
11 | 1CM1_3_R_#227 5038
12 | 1CM1_3_R_#228 4176
13 | 1CM42_15_R_#197 4124
14 | 1CM42_15_R_#198 4465
15 | 1CM42_15_R_#199 4243
16 | 1CM42_15_R_#200 4583
17 | 1CM42_30_R_#145 3760
18 | 1CM42_30_R_#146 3754
19 | 1CM42_30_R_#147 3691
20 | 1CM42_30_R_#148 3672
21 | 1CM42_31_R_#129 4043
22 | 1CM42_31_R_#130 4465
23 | 1CM42_31_R_#131 4226
24 | 1CM42_31_R_#132 4412
25 | 1CV12_12_R_#89 4395
26 | 1CV12_12_R_#90 4723
27 | 1CV12_12_R_#91 4465
28 | 1CV12_12_R_#92 4772
29 | 1CV12_15_R_#101 3623
30 | 1CV12_15_R_#102 3743
31 | 1CV12_15_R_#103 3634
32 | 1CV12_15_R_#104 4047
33 | 1CV12_21_R_#109 4776
34 | 1CV12_21_R_#110 4491
35 | 1CV12_21_R_#111 4684
36 | 1CV12_21_R_#112 4853
37 | 4CM11_13_R_#29 3841
38 | 4CM11_13_R_#30 3953
39 | 4CM11_13_R_#31 3945
40 | 4CM11_13_R_#32 3881
41 | 4CM11_18_R_#45 3664
42 | 4CM11_18_R_#46 3739
43 | 4CM11_18_R_#47 3691
44 | 4CM11_18_R_#48 3697
45 | 4CM11_20_R_#41 3746
46 | 4CM11_20_R_#42 3743
47 | 4CM11_20_R_#43 4005
48 | 4CM11_20_R_#44 4120
49 | 4CM11_24_L_#61 4026
50 | 4CM11_24_L_#62 3751
51 | 4CM11_24_L_#63 4000
52 | 4CM11_24_L_#64 3920
--------------------------------------------------------------------------------
/data/splits/ipn/Video_TrainList.txt:
--------------------------------------------------------------------------------
1 | 1CM1_4_R_#229 3751
2 | 1CM1_4_R_#230 3684
3 | 1CM1_4_R_#231 3747
4 | 1CM1_4_R_#232 3858
5 | 1CM42_11_R_#205 3686
6 | 1CM42_11_R_#206 4682
7 | 1CM42_11_R_#207 3682
8 | 1CM42_11_R_#208 3664
9 | 1CM42_12_R_#157 4287
10 | 1CM42_12_R_#158 4476
11 | 1CM42_12_R_#159 4383
12 | 1CM42_12_R_#160 4434
13 | 1CM42_13_R_#141 3687
14 | 1CM42_13_R_#142 3747
15 | 1CM42_13_R_#143 3786
16 | 1CM42_13_R_#144 3859
17 | 1CM42_17_R_#189 3616
18 | 1CM42_17_R_#190 3601
19 | 1CM42_17_R_#191 3608
20 | 1CM42_17_R_#192 3613
21 | 1CM42_18_R_#177 3675
22 | 1CM42_18_R_#178 3665
23 | 1CM42_18_R_#179 3686
24 | 1CM42_18_R_#180 3618
25 | 1CM42_21_R_#153 4188
26 | 1CM42_21_R_#154 4543
27 | 1CM42_21_R_#155 4709
28 | 1CM42_21_R_#156 4241
29 | 1CM42_26_R_#173 3836
30 | 1CM42_26_R_#174 3655
31 | 1CM42_26_R_#175 3811
32 | 1CM42_26_R_#176 3743
33 | 1CM42_3_R_#193 4088
34 | 1CM42_3_R_#194 3976
35 | 1CM42_3_R_#195 4188
36 | 1CM42_3_R_#196 3927
37 | 1CM42_32_R_#169 4765
38 | 1CM42_32_R_#170 4543
39 | 1CM42_32_R_#171 4242
40 | 1CM42_32_R_#172 4511
41 | 1CM42_4_R_#185 3637
42 | 1CM42_4_R_#186 3614
43 | 1CM42_4_R_#187 3771
44 | 1CM42_4_R_#188 3650
45 | 1CM42_6_R_#161 4077
46 | 1CM42_6_R_#162 4026
47 | 1CM42_6_R_#163 3866
48 | 1CM42_6_R_#164 4444
49 | 1CM42_7_L_#201 4291
50 | 1CM42_7_L_#202 4014
51 | 1CM42_7_L_#203 3882
52 | 1CM42_7_L_#204 3846
53 | 1CM42_9_R_#165 3988
54 | 1CM42_9_R_#166 3710
55 | 1CM42_9_R_#167 3792
56 | 1CM42_9_R_#168 3988
57 | 1CV12_1_R_#65 4206
58 | 1CV12_1_R_#66 4150
59 | 1CV12_1_R_#67 4121
60 | 1CV12_1_R_#68 4242
61 | 1CV12_13_R_#93 4523
62 | 1CV12_13_R_#94 4596
63 | 1CV12_13_R_#95 4066
64 | 1CV12_13_R_#96 4556
65 | 1CV12_16_R_#105 4211
66 | 1CV12_16_R_#106 4014
67 | 1CV12_16_R_#107 4476
68 | 1CV12_16_R_#108 4649
69 | 1CV12_2_R_#69 3950
70 | 1CV12_2_R_#70 3856
71 | 1CV12_2_R_#71 4011
72 | 1CV12_2_R_#72 4029
73 | 1CV12_22_R_#113 3760
74 | 1CV12_22_R_#114 4055
75 | 1CV12_22_R_#115 4445
76 | 1CV12_22_R_#116 4048
77 | 1CV12_23_R_#117 3657
78 | 1CV12_23_R_#118 3721
79 | 1CV12_23_R_#119 3748
80 | 1CV12_23_R_#120 3748
81 | 1CV12_6_R_#77 3641
82 | 1CV12_6_R_#78 3931
83 | 1CV12_6_R_#79 3827
84 | 1CV12_6_R_#80 3905
85 | 1CV12_7_R_#81 3673
86 | 1CV12_7_R_#82 3637
87 | 1CV12_7_R_#83 3690
88 | 1CV12_7_R_#84 3931
89 | 1CV12_8_R_#85 4146
90 | 1CV12_8_R_#86 4624
91 | 1CV12_8_R_#87 4510
92 | 1CV12_8_R_#88 3856
93 | 4CM11_1_R_#13 3969
94 | 4CM11_1_R_#14 4046
95 | 4CM11_1_R_#15 3935
96 | 4CM11_1_R_#16 4209
97 | 4CM11_10_R_#53 3677
98 | 4CM11_10_R_#54 3729
99 | 4CM11_10_R_#55 3713
100 | 4CM11_10_R_#56 3695
101 | 4CM11_11_R_#1 4484
102 | 4CM11_11_R_#2 4709
103 | 4CM11_11_R_#3 3664
104 | 4CM11_11_R_#4 4366
105 | 4CM11_14_R_#21 3732
106 | 4CM11_14_R_#22 3736
107 | 4CM11_14_R_#23 3747
108 | 4CM11_14_R_#24 3723
109 | 4CM11_15_R_#25 3722
110 | 4CM11_15_R_#26 3937
111 | 4CM11_15_R_#27 3892
112 | 4CM11_15_R_#28 3966
113 | 4CM11_16_R_#209 3680
114 | 4CM11_16_R_#210 3659
115 | 4CM11_16_R_#211 3757
116 | 4CM11_16_R_#212 3667
117 | 4CM11_17_R_#13 4905
118 | 4CM11_17_R_#14 4552
119 | 4CM11_17_R_#15 4492
120 | 4CM11_17_R_#16 4762
121 | 4CM11_19_R_#17 3823
122 | 4CM11_19_R_#18 3756
123 | 4CM11_19_R_#19 3684
124 | 4CM11_19_R_#20 3718
125 | 4CM11_2_R_#37 4047
126 | 4CM11_2_R_#38 4081
127 | 4CM11_2_R_#39 3969
128 | 4CM11_2_R_#40 3924
129 | 4CM11_23_R_#25 3719
130 | 4CM11_23_R_#26 3699
131 | 4CM11_23_R_#27 3661
132 | 4CM11_23_R_#28 3755
133 | 4CM11_26_R_#5 3789
134 | 4CM11_26_R_#6 3863
135 | 4CM11_26_R_#7 3798
136 | 4CM11_26_R_#8 3965
137 | 4CM11_29_R_#57 3975
138 | 4CM11_29_R_#58 3916
139 | 4CM11_29_R_#59 3853
140 | 4CM11_29_R_#60 3897
141 | 4CM11_5_L_#10 3765
142 | 4CM11_5_L_#11 3706
143 | 4CM11_5_L_#12 3724
144 | 4CM11_5_L_#9 3712
145 | 4CM11_7_R_#33 3780
146 | 4CM11_7_R_#34 4199
147 | 4CM11_7_R_#35 4151
148 | 4CM11_7_R_#36 3994
--------------------------------------------------------------------------------
/data/splits/ipn/classIdx.txt:
--------------------------------------------------------------------------------
1 | id,label,readable
2 | 1,D0X,Non_gesture
3 | 2,B0A,Pointing_1_finger
4 | 3,B0B,Pointing_2_fingers
5 | 4,G01,Click_1_finger
6 | 5,G02,Click_2_fingers
7 | 6,G03,Throw_up
8 | 7,G04,Throw_down
9 | 8,G05,Throw_left
10 | 9,G06,Throw_right
11 | 10,G07,Open_twice
12 | 11,G08,Double_1_finger
13 | 12,G09,Double_2_fingers
14 | 13,G10,Zoom_in
15 | 14,G11,Zoom_out
16 |
--------------------------------------------------------------------------------
/data/splits/ipn/metadata.xlsx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/data/splits/ipn/metadata.xlsx
--------------------------------------------------------------------------------
/data/splits/ipn/metadata_test.csv:
--------------------------------------------------------------------------------
1 | video,frames,label,Hand,Background,Illumination,People in Scene,Background Motion,Set
2 | 1CM1_1_R_#217,3855,W,Right,Plain,Stable,Single,Static,test
3 | 1CM1_1_R_#218,3656,W,Right,Plain,Stable,Single,Static,test
4 | 1CM1_1_R_#219,3922,W,Right,Plain,Stable,Single,Static,test
5 | 1CM1_1_R_#220,3890,W,Right,Plain,Stable,Single,Static,test
6 | 1CM1_2_R_#221,3769,M,Right,Clutter,Stable,Single,Static,test
7 | 1CM1_2_R_#222,3651,M,Right,Clutter,Stable,Single,Static,test
8 | 1CM1_2_R_#223,3989,M,Right,Clutter,Stable,Single,Static,test
9 | 1CM1_2_R_#224,3823,M,Right,Clutter,Stable,Single,Static,test
10 | 1CM1_3_R_#225,4261,M,Right,Clutter ,Dark,Single,Static,test
11 | 1CM1_3_R_#226,4795,M,Right,Clutter ,Dark,Single,Static,test
12 | 1CM1_3_R_#227,5038,M,Right,Clutter ,Dark,Single,Static,test
13 | 1CM1_3_R_#228,4176,M,Right,Clutter,Dark,Single,Static,test
14 | 1CM42_15_R_#197,4124,W,Right,Clutter,Stable,Single,Static,test
15 | 1CM42_15_R_#198,4465,W,Right,Clutter,Stable,Single,Static,test
16 | 1CM42_15_R_#199,4243,W,Right,Clutter,Stable,Single,Static,test
17 | 1CM42_15_R_#200,4583,W,Right,Clutter,Stable,Single,Static,test
18 | 1CM42_30_R_#145,3760,M,Right,Clutter,Stable,Multi,Dynamic,test
19 | 1CM42_30_R_#146,3754,M,Right,Plain,Stable,Single,Static,test
20 | 1CM42_30_R_#147,3691,M,Right,Plain,Stable,Single,Static,test
21 | 1CM42_30_R_#148,3672,M,Right,Plain,Stable,Single,Static,test
22 | 1CM42_31_R_#129,4043,W,Right,Clutter,Stable,Single,Static,test
23 | 1CM42_31_R_#130,4465,W,Right,Plain,Stable,Single,Static,test
24 | 1CM42_31_R_#131,4226,W,Right,Plain,Stable,Single,Static,test
25 | 1CM42_31_R_#132,4412,W,Right,Plain,Stable,Single,Static,test
26 | 1CV12_12_R_#89,4395,W,Right,Clutter,Light,Multi,Dynamic,test
27 | 1CV12_12_R_#90,4723,W,Right,Plain,Stable,Single,Static,test
28 | 1CV12_12_R_#91,4465,W,Right,Clutter,Light,Single,Static,test
29 | 1CV12_12_R_#92,4772,W,Right,Plain,Stable,Single,Static,test
30 | 1CV12_15_R_#101,3623,M,Right,Clutter,Stable,Single,Static,test
31 | 1CV12_15_R_#102,3743,M,Right,Clutter,Stable,Single,Static,test
32 | 1CV12_15_R_#103,3634,M,Right,Plain,Stable,Single,Static,test
33 | 1CV12_15_R_#104,4047,M,Right,Plain,Stable,Single,Static,test
34 | 1CV12_21_R_#109,4776,W,Right,Plain,Stable,Single,Static,test
35 | 1CV12_21_R_#110,4491,W,Right,Clutter,Light,Single,Dynamic,test
36 | 1CV12_21_R_#111,4684,W,Right,Plain,Stable,Single,Static,test
37 | 1CV12_21_R_#112,4853,W,Right,Plain,Stable,Single,Static,test
38 | 4CM11_13_R_#29,3841,M,Right,Plain,Stable,Single,Dynamic,test
39 | 4CM11_13_R_#30,3953,M,Right,Plain,Stable,Single,Static,test
40 | 4CM11_13_R_#31,3945,M,Right,Plain,Stable,Single,Static,test
41 | 4CM11_13_R_#32,3881,M,Right,Plain,Stable,Single,Static,test
42 | 4CM11_18_R_#45,3664,M,Right,Plain,Stable,Single,Static,test
43 | 4CM11_18_R_#46,3739,M,Right,Plain,Stable,Single,Static,test
44 | 4CM11_18_R_#47,3691,M,Right,Plain,Stable,Single,Static,test
45 | 4CM11_18_R_#48,3697,M,Right,Plain,Stable,Single,Static,test
46 | 4CM11_20_R_#41,3746,M,Right,Plain,Stable,Single,Static,test
47 | 4CM11_20_R_#42,3743,M,Right,Plain,Stable,Single,Static,test
48 | 4CM11_20_R_#43,4005,M,Right,Plain,Stable,Single,Static,test
49 | 4CM11_20_R_#44,4120,M,Right,Plain,Stable,Single,Static,test
50 | 4CM11_24_L_#61,4026,M,Right,Clutter,Light,Multi,Dynamic,test
51 | 4CM11_24_L_#62,3751,M,Right,Clutter,Light,Single,Dynamic,test
52 | 4CM11_24_L_#63,4000,M,Right,Clutter,Light,Single,Static,test
53 | 4CM11_24_L_#64,3920,M,Right,Clutter,Light,Single,Static,test
54 |
--------------------------------------------------------------------------------
/data/splits/kth/create_metadata.py:
--------------------------------------------------------------------------------
1 | from os.path import join
2 | import pandas as pd
3 | import shutil
4 | from glob import glob
5 | import random
6 |
7 | root_dir = "data/kth/"
8 |
9 | # list of all directories
10 | dirs = sorted(glob(join(root_dir, "*")))
11 |
12 | df = pd.DataFrame(columns=["video", "action", "person", "background", "frames", "set"])
13 |
14 | actions = ["boxing", "handclapping", "handwaving", "jogging", "running", "walking"]
15 | persons = [f"person{i:02d}" for i in range(1, 26)]
16 | backgrounds = [f"d{i}" for i in range(1, 5)]
17 |
18 | rows_list = []
19 | array = ["train", "train", "train", "test"]
20 | for p in persons:
21 | for a in actions:
22 | for d, split in zip(backgrounds, random.sample(array, len(array))):
23 | # for d, split in zip(backgrounds, array):
24 | path = f"{p}_{a}_{d}_uncomp"
25 | n_frames = len(glob(join(root_dir, path, "*.png")))
26 | rows_list.append(
27 | {
28 | "video": path,
29 | "action": a,
30 | "person": p,
31 | "background": d,
32 | "frames": n_frames,
33 | "set": split,
34 | }
35 | )
36 |
37 | df = pd.DataFrame(rows_list)
38 | df.to_csv(join("data", "splits", "kth", "metadata.csv"), index=False)
39 |
40 | print(df)
41 |
--------------------------------------------------------------------------------
/data/splits/kth/download_kth.sh:
--------------------------------------------------------------------------------
1 | wget http://www.nada.kth.se/cvap/actions/boxing.zip
2 | wget http://www.nada.kth.se/cvap/actions/handclapping.zip
3 | wget http://www.nada.kth.se/cvap/actions/handwaving.zip
4 | wget http://www.nada.kth.se/cvap/actions/jogging.zip
5 | wget http://www.nada.kth.se/cvap/actions/running.zip
6 | wget http://www.nada.kth.se/cvap/actions/walking.zip
7 |
8 | unzip boxing.zip -d boxing
9 | unzip handclapping.zip -d handclapping
10 | unzip handwaving.zip -d handwaving
11 | unzip jogging.zip -d jogging
12 | unzip running.zip -d running
13 | unzip walking.zip -d walking
14 |
15 | rm *.zip
--------------------------------------------------------------------------------
/data/splits/kth/extract_kthframes.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Check if FFmpeg is installed
4 | if ! command -v ffmpeg &> /dev/null; then
5 | echo "FFmpeg is not installed. Please install it first."
6 | exit 1
7 | fi
8 |
9 | # Check if there are any video files in the current directory
10 | shopt -s nullglob
11 | video_files=(*.mp4 *.avi *.mkv)
12 |
13 | if [ ${#video_files[@]} -eq 0 ]; then
14 | echo "No video files found in the current directory."
15 | exit 1
16 | fi
17 |
18 | # Loop through video files
19 | for video_file in "${video_files[@]}"; do
20 | # Get the video file name without extension
21 | video_name=$(basename -- "${video_file%.*}")
22 |
23 | # Create a subfolder with the video name if it doesn't exist
24 | mkdir -p "$video_name"
25 |
26 | # Extract frames into the subfolder
27 | ffmpeg -i "$video_file" -vf "fps=25" "$video_name/frame%04d.png"
28 |
29 | echo "Extracted frames from $video_file and placed them in $video_name/"
30 | done
31 |
32 | echo "All frames extracted successfully."
--------------------------------------------------------------------------------
/data/splits/sbu/test.txt:
--------------------------------------------------------------------------------
1 | name,t_start,t_end,person,action
2 | s01s02/01/002,1,24,0,0
3 | s07s03/01/002,1,28,12,0
4 | s04s06/04/001,1,22,10,3
5 | s05s02/02/002,1,18,11,1
6 | s03s04/08/002,1,14,6,7
7 | s04s06/03/002,1,24,10,2
8 | s01s07/01/002,1,18,2,0
9 | s01s02/02/002,1,21,0,1
10 | s06s04/08/001,1,24,10,7
11 | s05s03/05/001,1,38,7,4
12 | s06s03/01/001,1,27,8,0
13 | s06s04/01/001,1,40,10,0
14 | s03s06/04/001,1,18,8,3
15 | s04s06/08/001,1,22,10,7
16 | s02s03/03/002,1,18,3,2
17 | s06s03/06/001,1,27,8,5
18 | s01s02/08/001,1,24,0,7
19 | s05s03/04/002,1,25,7,3
20 | s07s01/01/002,1,16,2,0
21 | s02s06/05/001,1,29,4,4
22 | s06s03/02/002,1,17,8,1
23 | s07s01/06/001,1,38,2,5
24 | s02s07/03/003,1,29,5,2
25 | s02s07/01/002,1,19,5,0
26 | s01s03/08/002,1,19,1,7
27 | s02s01/02/003,1,24,0,1
28 | s06s03/07/001,1,17,8,6
29 | s05s02/04/003,1,28,11,3
30 | s07s03/07/001,1,28,12,6
31 | s05s02/03/001,1,28,11,2
32 | s05s03/01/001,1,21,7,0
33 | s02s01/03/001,1,22,0,2
34 | s02s06/06/001,1,24,4,5
35 | s01s02/07/002,1,18,0,6
36 | s05s02/02/001,1,21,11,1
37 | s01s07/01/001,1,13,2,0
38 | s01s07/03/001,1,21,2,2
39 | s02s01/03/003,1,22,0,2
40 | s03s02/01/002,1,21,3,0
41 | s02s07/06/001,1,34,5,5
42 | s04s02/07/001,1,24,9,6
43 | s02s07/03/001,1,25,5,2
44 | s04s03/05/001,1,20,6,4
45 | s02s03/07/001,1,19,3,6
46 | s07s03/03/001,1,16,12,2
47 | s05s03/07/001,1,25,7,6
48 | s03s05/02/001,1,23,7,1
49 | s02s01/04/001,1,40,0,3
50 | s04s06/02/002,1,23,10,1
51 | s04s06/01/002,1,12,10,0
52 | s03s02/04/001,1,24,3,3
53 | s07s01/03/002,1,20,2,2
54 | s01s03/08/003,1,17,1,7
55 | s02s07/04/002,1,38,5,3
56 | s03s02/08/001,1,22,3,7
57 | s03s06/06/001,1,29,8,5
58 | s02s01/05/001,1,41,0,4
59 | s01s07/07/001,1,30,2,6
60 | s06s02/01/002,1,21,4,0
61 | s03s06/07/002,1,16,8,6
62 | s06s02/07/002,1,21,4,6
63 | s02s07/07/001,1,22,5,6
64 | s02s01/03/002,1,19,0,2
65 | s06s03/03/002,1,20,8,2
66 | s06s04/07/002,1,34,10,6
67 | s03s02/01/001,1,21,3,0
68 | s01s03/07/001,1,20,1,6
69 | s06s04/02/002,1,24,10,1
70 | s03s05/03/001,1,28,7,2
71 | s04s02/01/002,1,20,9,0
72 | s06s04/07/001,1,25,10,6
73 | s03s05/02/002,1,15,7,1
74 | s04s06/05/001,1,30,10,4
75 | s02s06/02/001,1,21,4,1
76 | s02s07/02/001,1,18,5,1
77 | s04s02/05/001,1,37,9,4
78 | s02s06/07/001,1,19,4,6
79 | s07s03/07/002,1,31,12,6
80 | s05s03/08/002,1,21,7,7
81 | s01s03/01/002,1,21,1,0
82 | s04s03/04/002,1,25,6,3
83 | s01s02/04/002,1,30,0,3
84 | s07s01/02/001,1,19,2,1
85 | s02s01/02/001,1,22,0,1
86 | s04s03/07/001,1,23,6,6
87 | s02s03/02/002,1,11,3,1
88 | s04s06/02/001,1,16,10,1
89 | s06s04/03/002,1,30,10,2
90 | s05s02/04/001,1,30,11,3
91 | s02s06/08/002,1,20,4,7
92 | s03s02/02/001,1,21,3,1
93 | s01s03/08/001,1,19,1,7
94 | s02s06/03/001,1,19,4,2
95 | s02s07/04/001,1,28,5,3
--------------------------------------------------------------------------------
/data/splits/sbu/train.txt:
--------------------------------------------------------------------------------
1 | name,t_start,t_end,person,action
2 | s03s06/01/002,1,26,8,0
3 | s01s07/02/002,1,17,2,1
4 | s06s02/05/001,1,28,4,4
5 | s03s05/08/002,1,20,7,7
6 | s05s03/03/001,1,25,7,2
7 | s06s04/02/001,1,17,10,1
8 | s07s03/04/001,1,32,12,3
9 | s04s02/08/002,1,23,9,7
10 | s02s01/01/001,1,19,0,0
11 | s01s07/08/002,1,34,2,7
12 | s06s03/05/001,1,32,8,4
13 | s07s01/07/002,1,34,2,6
14 | s03s05/07/001,1,28,7,6
15 | s03s06/03/001,1,16,8,2
16 | s02s03/08/001,1,19,3,7
17 | s03s02/06/001,1,26,3,5
18 | s01s03/03/002,1,19,1,2
19 | s02s06/01/002,1,21,4,0
20 | s07s01/07/001,1,42,2,6
21 | s01s03/02/001,1,21,1,1
22 | s02s03/06/001,1,35,3,5
23 | s02s01/07/001,1,24,0,6
24 | s07s01/05/001,1,40,2,4
25 | s06s04/04/001,1,30,10,3
26 | s02s01/07/002,1,24,0,6
27 | s01s02/02/001,1,17,0,1
28 | s07s01/03/001,1,25,2,2
29 | s01s03/06/001,1,23,1,5
30 | s03s02/05/001,1,29,3,4
31 | s06s02/04/001,1,22,4,3
32 | s03s06/02/002,1,17,8,1
33 | s06s02/04/002,1,24,4,3
34 | s04s03/03/001,1,22,6,2
35 | s07s03/03/002,1,18,12,2
36 | s06s04/05/001,1,36,10,4
37 | s04s02/04/001,1,29,9,3
38 | s03s05/01/001,1,21,7,0
39 | s04s06/08/002,1,28,10,7
40 | s01s03/04/001,1,22,1,3
41 | s05s03/02/001,1,17,7,1
42 | s03s06/02/001,1,17,8,1
43 | s04s03/02/001,1,13,6,1
44 | s02s07/03/002,1,19,5,2
45 | s02s06/01/001,1,21,4,0
46 | s03s04/01/002,1,23,6,0
47 | s03s05/05/001,1,38,7,4
48 | s03s04/01/001,1,24,6,0
49 | s01s02/06/001,1,32,0,5
50 | s07s03/08/001,1,27,12,7
51 | s02s03/02/001,1,23,3,1
52 | s05s02/04/002,1,31,11,3
53 | s06s03/07/002,1,23,8,6
54 | s01s02/04/001,1,28,0,3
55 | s03s05/04/002,1,28,7,3
56 | s05s03/04/001,1,32,7,3
57 | s05s02/06/001,1,36,11,5
58 | s02s06/08/001,1,17,4,7
59 | s03s06/07/001,1,22,8,6
60 | s02s03/04/002,1,24,3,3
61 | s03s02/08/002,1,19,3,7
62 | s04s06/07/001,1,36,10,6
63 | s03s06/01/001,1,23,8,0
64 | s01s03/01/001,1,20,1,0
65 | s05s03/02/002,1,16,7,1
66 | s06s04/01/002,1,25,10,0
67 | s04s03/02/002,1,18,6,1
68 | s06s02/06/001,1,24,4,5
69 | s02s07/08/001,1,23,5,7
70 | s03s04/06/001,1,41,6,5
71 | s06s03/04/002,1,25,8,3
72 | s03s05/08/001,1,20,7,7
73 | s02s01/02/002,1,16,0,1
74 | s02s06/02/002,1,18,4,1
75 | s01s07/04/001,1,31,2,3
76 | s07s01/04/001,1,36,2,3
77 | s04s02/03/001,1,22,9,2
78 | s02s07/01/001,1,24,5,0
79 | s01s07/07/002,1,28,2,6
80 | s07s01/08/001,1,32,2,7
81 | s03s04/03/001,1,24,6,2
82 | s01s02/05/001,1,29,0,4
83 | s05s03/06/001,1,24,7,5
84 | s07s01/02/002,1,15,2,1
85 | s03s04/04/002,1,26,6,3
86 | s06s03/02/001,1,19,8,1
87 | s05s02/01/001,1,30,11,0
88 | s05s03/08/001,1,25,7,7
89 | s02s01/06/001,1,30,0,5
90 | s04s03/01/001,1,21,6,0
91 | s04s06/07/002,1,23,10,6
92 | s03s05/07/002,1,24,7,6
93 | s04s03/04/001,1,22,6,3
94 | s06s02/01/001,1,31,4,0
95 | s02s01/01/003,1,17,0,0
96 | s03s04/02/002,1,17,6,1
97 | s03s02/03/002,1,19,3,2
98 | s02s07/08/002,1,25,5,7
99 | s03s04/03/002,1,17,6,2
100 | s02s03/03/001,1,21,3,2
101 | s04s03/08/002,1,22,6,7
102 | s01s03/04/002,1,24,1,3
103 | s07s01/04/002,1,38,2,3
104 | s07s03/04/002,1,23,12,3
105 | s04s06/06/001,1,26,10,5
106 | s03s02/07/001,1,23,3,6
107 | s01s02/08/002,1,10,0,7
108 | s04s03/06/001,1,26,6,5
109 | s03s06/04/002,1,26,8,3
110 | s06s02/08/001,1,27,4,7
111 | s02s03/01/002,1,33,3,0
112 | s03s04/04/001,1,36,6,3
113 | s01s02/07/003,1,45,0,6
114 | s01s02/01/001,1,45,0,0
115 | s04s06/04/002,1,23,10,3
116 | s01s07/04/002,1,33,2,3
117 | s07s01/01/001,1,23,2,0
118 | s06s02/02/002,1,19,4,1
119 | s02s07/05/001,1,46,5,4
120 | s05s02/08/002,1,24,11,7
121 | s03s02/02/002,1,16,3,1
122 | s02s01/08/001,1,18,0,7
123 | s05s02/08/001,1,22,11,7
124 | s06s03/03/001,1,15,8,2
125 | s02s07/07/002,1,30,5,6
126 | s04s02/01/001,1,23,9,0
127 | s04s06/01/001,1,20,10,0
128 | s03s02/03/001,1,27,3,2
129 | s04s02/02/002,1,19,9,1
130 | s03s04/08/001,1,24,6,7
131 | s07s03/02/001,1,23,12,1
132 | s06s04/03/001,1,19,10,2
133 | s02s01/01/002,1,17,0,0
134 | s02s07/02/002,1,13,5,1
135 | s04s03/08/001,1,21,6,7
136 | s06s02/07/001,1,24,4,6
137 | s04s02/08/001,1,27,9,7
138 | s03s06/08/001,1,18,8,7
139 | s03s04/02/001,1,16,6,1
140 | s03s05/03/002,1,20,7,2
141 | s06s03/04/001,1,27,8,3
142 | s07s03/08/002,1,26,12,7
143 | s07s03/02/002,1,21,12,1
144 | s04s02/06/001,1,23,9,5
145 | s01s07/05/001,1,32,2,4
146 | s03s02/07/002,1,22,3,6
147 | s07s03/06/001,1,29,12,5
148 | s05s02/03/002,1,28,11,2
149 | s03s06/08/002,1,19,8,7
150 | s06s02/03/001,1,17,4,2
151 | s06s04/04/002,1,28,10,3
152 | s02s06/03/002,1,23,4,2
153 | s01s03/02/002,1,21,1,1
154 | s02s03/04/001,1,26,3,3
155 | s05s03/01/002,1,24,7,0
156 | s06s04/06/001,1,40,10,5
157 | s07s03/05/001,1,40,12,4
158 | s07s01/08/002,1,25,2,7
159 | s02s03/07/002,1,27,3,6
160 | s01s03/05/001,1,22,1,4
161 | s03s04/07/001,1,20,6,6
162 | s06s04/08/002,1,24,10,7
163 | s05s02/01/002,1,28,11,0
164 | s01s07/02/001,1,12,2,1
165 | s01s03/03/001,1,17,1,2
166 | s04s02/04/002,1,28,9,3
167 | s04s03/07/002,1,22,6,6
168 | s04s02/07/002,1,26,9,6
169 | s01s02/03/001,1,17,0,2
170 | s06s02/02/001,1,16,4,1
171 | s03s05/06/001,1,32,7,5
172 | s02s06/04/002,1,24,4,3
173 | s03s05/04/001,1,32,7,3
174 | s07s03/01/001,1,24,12,0
175 | s06s03/01/002,1,24,8,0
176 | s02s03/01/001,1,34,3,0
177 | s03s04/05/001,1,35,6,4
178 | s02s06/07/002,1,19,4,6
179 | s01s07/06/001,1,44,2,5
180 | s04s03/03/002,1,18,6,2
181 | s01s07/03/002,1,26,2,2
182 | s03s04/07/002,1,24,6,6
183 | s04s03/01/002,1,23,6,0
184 | s01s02/03/002,1,16,0,2
185 | s04s06/03/001,1,16,10,2
186 | s04s02/02/001,1,10,9,1
187 | s01s02/07/001,1,22,0,6
188 | s02s06/04/001,1,23,4,3
189 | s01s07/08/001,1,20,2,7
--------------------------------------------------------------------------------
/dataset/__init__.py:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/dataset/db_factory.py:
--------------------------------------------------------------------------------
1 | from matplotlib import pyplot as plt
2 |
3 | from config import build_cfg
4 | from dataset.ipn import IPN
5 | from dataset.kth import KTH
6 | from dataset.sbu import SBU
7 | from transforms.transform import InverseNormalizeVideo, transform_default
8 | from utils.VideoTensorViewer import VideoTensorViewer
9 |
10 |
11 | def DBfactory(dbname, set_split, config):
12 | if dbname in ["ipn"]:
13 | T = transform_default(config, set_split)
14 | T_inv = InverseNormalizeVideo(config)
15 | data_root = f"data/{dbname}"
16 | annot_root = f"data/splits/ipn/"
17 | db = IPN(data_root, annot_root, set_split, T, T_inv, config)
18 | elif dbname in ["kth"]:
19 | T = transform_default(config, set_split)
20 | T_inv = InverseNormalizeVideo(config)
21 | data_root = f"data/{dbname}"
22 | annot_root = f"data/splits/kth/"
23 | db = KTH(data_root, annot_root, set_split, T, T_inv, config)
24 | elif dbname in ["sbu"]:
25 | T = transform_default(config, set_split)
26 | T_inv = InverseNormalizeVideo(config)
27 | data_root = f"data/{dbname}"
28 | annot_root = f"data/splits/sbu/"
29 | db = SBU(data_root, annot_root, set_split, T, T_inv, config)
30 | else:
31 | raise ValueError(f"Invalid Database name {dbname}")
32 | return db
33 |
34 |
35 | def show_single_videovolume():
36 | cfg = build_cfg()
37 | cfg["num_frames"] = 30
38 | cfg["sampling_rate"] = 1
39 | cfg["architecture"] = "x3d_s"
40 |
41 | # cfg = None
42 | dl = DBfactory(cfg["datasetname"], set_split="train", config=cfg)
43 |
44 | for step, (s, masks, flows, labels) in enumerate(dl):
45 | sample = dl.inverse_normalise(s)
46 | # VideoTensorViewer(torch.stack([sample, masks], dim=3))
47 | VideoTensorViewer(sample)
48 | plt.show(block=True)
49 |
50 |
51 | if __name__ == "__main__":
52 | show_single_videovolume()
53 |
--------------------------------------------------------------------------------
/dataset/db_stats.py:
--------------------------------------------------------------------------------
1 | db_stats = {
2 | "ipn": {
3 | "mean": (0.0414, 0.0022, 0.1532),
4 | "std": (1.1118, 1.1657, 1.1551),
5 | },
6 | "kth": {
7 | "mean": (0.5705, 0.5705, 0.5705),
8 | "std": (0.1864, 0.1864, 0.1864),
9 | },
10 | "sbu": {
11 | "mean": (0.6463, 0.6214, 0.5943),
12 | "std": (0.2310, 0.2331, 0.2331),
13 | },
14 | "ipn_flow": {
15 | "mean": (0.9678, 0.9637, 0.9713),
16 | "std": (0.0770, 0.0773, 0.0691),
17 | },
18 | "kth_flow": {
19 | "mean": (0.9678, 0.9637, 0.9713),
20 | "std": (0.0770, 0.0773, 0.0691),
21 | },
22 | "sbu_flow": {
23 | "mean": (0.9678, 0.9637, 0.9713),
24 | "std": (0.0770, 0.0773, 0.0691),
25 | },
26 | "ipn_afd": {
27 | "mean": (0.5, 0.5, 0.5),
28 | "std": (0.2126, 0.2126, 0.2126),
29 | },
30 | "kth_afd": {
31 | "mean": (0.5, 0.5, 0.5),
32 | "std": (0.2126, 0.2126, 0.2126),
33 | },
34 | "sbu_afd": {
35 | "mean": (0.5, 0.5, 0.5),
36 | "std": (0.2126, 0.2126, 0.2126),
37 | },
38 | }
39 |
--------------------------------------------------------------------------------
/matcher/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | pip-wheel-metadata/
24 | share/python-wheels/
25 | *.egg-info/
26 | .installed.cfg
27 | *.egg
28 | MANIFEST
29 |
30 | # PyInstaller
31 | # Usually these files are written by a python script from a template
32 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
33 | *.manifest
34 | *.spec
35 |
36 | # Installer logs
37 | pip-log.txt
38 | pip-delete-this-directory.txt
39 |
40 | # Unit test / coverage reports
41 | htmlcov/
42 | .tox/
43 | .nox/
44 | .coverage
45 | .coverage.*
46 | .cache
47 | nosetests.xml
48 | coverage.xml
49 | *.cover
50 | *.py,cover
51 | .hypothesis/
52 | .pytest_cache/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | target/
76 |
77 | # Jupyter Notebook
78 | .ipynb_checkpoints
79 |
80 | # IPython
81 | profile_default/
82 | ipython_config.py
83 |
84 | # pyenv
85 | .python-version
86 |
87 | # pipenv
88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
91 | # install all needed dependencies.
92 | #Pipfile.lock
93 |
94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
95 | __pypackages__/
96 |
97 | # Celery stuff
98 | celerybeat-schedule
99 | celerybeat.pid
100 |
101 | # SageMath parsed files
102 | *.sage.py
103 |
104 | # Environments
105 | .env
106 | .venv
107 | env/
108 | venv/
109 | ENV/
110 | env.bak/
111 | venv.bak/
112 |
113 | # Spyder project settings
114 | .spyderproject
115 | .spyproject
116 |
117 | # Rope project settings
118 | .ropeproject
119 |
120 | # mkdocs documentation
121 | /site
122 |
123 | # mypy
124 | .mypy_cache/
125 | .dmypy.json
126 | dmypy.json
127 |
128 | # Pyre type checker
129 | .pyre/
130 |
--------------------------------------------------------------------------------
/matcher/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2022 Shir Amir
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/matcher/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/__init__.py
--------------------------------------------------------------------------------
/matcher/compute_similarities_from_descriptor.py:
--------------------------------------------------------------------------------
1 | import argparse
2 |
3 | import torch
4 | import torchvision
5 | from torchvision import transforms
6 | from torchvision.transforms import Resize, ToTensor
7 |
8 | # torchvision.set_video_backend("video_reader")
9 | from pathlib import Path
10 |
11 | from PIL import Image
12 |
13 | from helpers import (
14 | get_best_matching_descriptor,
15 | save_similarity_from_descriptor,
16 | str2bool,
17 | )
18 |
19 |
20 | def parse():
21 | parser = argparse.ArgumentParser(
22 | description="Facilitate similarity inspection between two images."
23 | )
24 |
25 | parser.add_argument(
26 | "-descriptorpath",
27 | type=str,
28 | default="output/descriptors/test.pt",
29 | help="The descriptor to compare to",
30 | )
31 |
32 | parser.add_argument(
33 | "-imagesdir",
34 | type=str,
35 | default="input/",
36 | help="Directory with images to compare to compare the descriptor to and compute the smiliarity",
37 | )
38 |
39 | parser.add_argument(
40 | "--model_type",
41 | default="dino_vits8",
42 | type=str,
43 | help="""type of model to extract.
44 | Choose from [dino_vits8 | dino_vits16 | dino_vitb8 | dino_vitb16 | vit_small_patch8_224 |
45 | vit_small_patch16_224 | vit_base_patch8_224 | vit_base_patch16_224]""",
46 | )
47 | parser.add_argument(
48 | "--facet",
49 | default="key",
50 | type=str,
51 | help="""facet to create descriptors from.
52 | options: ['key' | 'query' | 'value' | 'token']""",
53 | )
54 | parser.add_argument(
55 | "--layer",
56 | default=11,
57 | type=int,
58 | help="layer to create descriptors from.",
59 | )
60 |
61 | parser.add_argument(
62 | "--use_targeted",
63 | default="False",
64 | type=str2bool,
65 | help="Match descriptor to first image in sequence, or use raw.",
66 | )
67 |
68 | args = parser.parse_args()
69 | return args
70 |
71 |
72 | if __name__ == "__main__":
73 | args = parse()
74 | load_size = 224
75 | bin = False
76 | stride = 4
77 |
78 | images = []
79 | for ext in ("*.jpg", "*.png"):
80 | images.extend(list(Path(args.imagesdir).glob(ext)))
81 |
82 | image_strings = [str(p) for p in sorted(images)]
83 |
84 | frame_list = [
85 | ToTensor()(
86 | Resize(load_size, interpolation=transforms.InterpolationMode.LANCZOS)(
87 | Image.open(i).convert("RGB")
88 | )
89 | )
90 | for i in image_strings
91 | ]
92 |
93 | videoname = args.imagesdir.split("/")[-2]
94 | descriptor = torch.load(args.descriptorpath)
95 | descriptor_name = args.descriptorpath.split("/")[-1].split(".")[0]
96 |
97 | if args.use_targeted:
98 | descriptor = get_best_matching_descriptor(descriptor, image_strings[0])
99 |
100 | with torch.no_grad():
101 | save_similarity_from_descriptor(
102 | descriptor,
103 | videoname,
104 | image_strings,
105 | load_size,
106 | args.layer,
107 | args.facet,
108 | bin,
109 | stride,
110 | args.model_type,
111 | prefix_savedir="output/similarities/",
112 | name=descriptor_name,
113 | )
114 |
--------------------------------------------------------------------------------
/matcher/helpers.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | import numpy as np
4 | import PIL
5 | import torch
6 | from torchvision import transforms
7 | from torchvision.utils import save_image
8 | from tqdm import tqdm
9 |
10 | from matcher.extractor import ViTExtractor
11 |
12 |
13 | def chunk_cosine_sim(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
14 | """Computes cosine similarity between all possible pairs in two sets of vectors.
15 | Operates on chunks so no large amount of GPU RAM is required.
16 | :param x: an tensor of descriptors of shape Bx1x(t_x)xd' where d' is the dimensionality of the descriptors and t_x
17 | is the number of tokens in x.
18 | :param y: a tensor of descriptors of shape Bx1x(t_y)xd' where d' is the dimensionality of the descriptors and t_y
19 | is the number of tokens in y.
20 | :return: cosine similarity between all descriptors in x and all descriptors in y. Has shape of Bx1x(t_x)x(t_y)
21 | """
22 | result_list = []
23 | num_token_x = x.shape[2]
24 | for token_idx in range(num_token_x):
25 | token = x[:, :, token_idx, :].unsqueeze(dim=2) # Bx1x1xd'
26 | result_list.append(torch.nn.CosineSimilarity(dim=3)(token, y)) # Bx1xt
27 | return torch.stack(result_list, dim=2) # Bx1x(t_x)x(t_y)
28 |
29 |
30 | def str2bool(v):
31 | if isinstance(v, bool):
32 | return v
33 | if v.lower() in ("yes", "true", "t", "y", "1"):
34 | return True
35 | elif v.lower() in ("no", "false", "f", "n", "0"):
36 | return False
37 | else:
38 | raise ValueError("Cannot convert to boolean")
39 |
40 |
41 | # get the best matching descriptor in the first image of the video to the descriptor
42 | def get_best_matching_descriptor(descriptor, image_path):
43 | with torch.no_grad():
44 | # extract descriptors
45 | device = "cuda" if torch.cuda.is_available() else "cpu"
46 | extractor = ViTExtractor(device=device)
47 | image_batch, _ = extractor.preprocess(image_path, load_size=224)
48 |
49 | descs = extractor.extract_descriptors(
50 | image_batch.to(device), layer=11, facet="key", bin=False, include_cls=False
51 | )
52 |
53 | # compute similarity
54 | sim = chunk_cosine_sim(descriptor[None, None, None], descs)
55 | sim_image = sim.reshape(extractor.num_patches)
56 | sim_image = sim_image.cpu().numpy()
57 |
58 | # get best matching descriptor
59 | best_matching_descriptor = np.argmax(sim_image)
60 | return descs[:, :, best_matching_descriptor].squeeze()
61 |
62 |
63 | def save_similarity_from_descriptor(
64 | descriptor,
65 | videoname: str,
66 | images: str,
67 | load_size: int = 224,
68 | layer: int = 11,
69 | facet: str = "key",
70 | bin: bool = False,
71 | stride: int = 4,
72 | model_type: str = "dino_vits8",
73 | prefix_savedir="output/similarities/",
74 | name=None,
75 | ):
76 | device = "cuda" if torch.cuda.is_available() else "cpu"
77 | extractor = ViTExtractor(model_type, stride, device=device)
78 | patch_size = extractor.model.patch_embed.patch_size
79 | img_size = PIL.Image.open(images[0]).size[::-1]
80 |
81 | similarities = []
82 |
83 | for en, image_path_b in enumerate(images):
84 | print(f"Computing Descriptors {en}")
85 | image_batch_b, image_pil_b = extractor.preprocess(image_path_b, load_size)
86 | descs_b = extractor.extract_descriptors(
87 | image_batch_b.to(device), layer, facet, bin, include_cls=False
88 | )
89 | num_patches_b, load_size_b = extractor.num_patches, extractor.load_size
90 | sim = chunk_cosine_sim(descriptor[None, None, None], descs_b)
91 | similarities.append(sim)
92 |
93 | sim_image = sim.reshape(num_patches_b)
94 | os.makedirs(prefix_savedir + f"/{name}_{videoname}", exist_ok=True)
95 | sim_image = transforms.Resize(img_size, antialias=True)(sim_image.unsqueeze(0))
96 | save_image(sim_image, f"{prefix_savedir}/{name}_{videoname}/{en:04d}.png")
97 |
98 |
99 | def similarity_from_descriptor(
100 | descriptor,
101 | images: str,
102 | load_size: int = 224,
103 | layer: int = 11,
104 | facet: str = "key",
105 | bin: bool = False,
106 | stride: int = 4,
107 | model_type: str = "dino_vits8",
108 | ):
109 | device = "cuda" if torch.cuda.is_available() else "cpu"
110 | extractor = ViTExtractor(model_type, stride, device=device)
111 | similarities = []
112 | img_size = PIL.Image.open(images[0]).size[::-1]
113 |
114 | ret = []
115 | for en, image_path_b in tqdm(enumerate(images), total=len(images), position=1):
116 | # print(f"Computing Descriptors {en}")
117 | image_batch_b, image_pil_b = extractor.preprocess(image_path_b, load_size)
118 | descs_b = extractor.extract_descriptors(
119 | image_batch_b.to(device), layer, facet, bin, include_cls=False
120 | )
121 | num_patches_b, load_size_b = extractor.num_patches, extractor.load_size
122 | sim = chunk_cosine_sim(descriptor[None, None, None], descs_b)
123 | similarities.append(sim)
124 |
125 | sim_image = sim.reshape(num_patches_b)
126 | ret_img = transforms.Resize(img_size, antialias=True)(sim_image.unsqueeze(0))
127 |
128 | ret.append(ret_img.squeeze())
129 | return ret
130 |
--------------------------------------------------------------------------------
/matcher/input/ipn1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/ipn1.jpg
--------------------------------------------------------------------------------
/matcher/input/ipn2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/ipn2.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000100.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000100.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000101.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000101.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000102.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000102.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000103.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000103.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000104.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000104.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000105.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000105.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000106.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000106.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000107.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000107.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000108.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000108.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000109.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000109.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000110.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000110.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000111.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000111.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000112.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000112.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000113.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000113.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000114.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000114.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000115.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000115.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000116.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000116.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000117.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000117.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000118.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000118.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000119.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000119.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000120.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000120.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000121.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000121.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000122.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000122.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000123.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000123.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000124.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000124.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000125.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000125.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000126.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000126.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000127.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000127.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000128.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000128.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000129.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000129.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000130.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000130.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000131.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000131.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000132.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000132.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000133.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000133.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000134.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000134.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000135.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000135.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000136.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000136.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000137.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000137.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000138.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000138.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000139.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000139.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000140.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000140.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000141.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000141.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000142.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000142.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000143.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000143.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000144.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000144.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000145.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000145.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000146.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000146.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000147.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000147.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000148.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000148.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000149.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000149.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000150.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000150.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000151.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000151.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000152.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000152.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000153.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000153.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000154.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000154.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000155.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000155.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000156.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000156.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000157.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000157.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000158.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000158.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000159.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000159.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000160.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000160.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000161.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000161.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000162.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000162.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000163.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000163.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000164.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000164.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000165.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000165.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000166.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000166.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000167.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000167.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000168.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000168.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000169.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000169.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000170.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000170.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000171.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000171.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000172.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000172.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000173.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000173.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000174.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000174.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000175.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000175.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000176.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000176.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000177.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000177.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000178.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000178.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000179.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000179.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000180.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000180.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000181.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000181.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000182.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000182.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000183.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000183.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000184.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000184.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000185.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000185.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000186.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000186.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000187.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000187.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000188.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000188.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000189.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000189.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000190.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000190.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000191.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000191.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000192.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000192.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000193.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000193.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000194.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000194.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000195.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000195.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000196.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000196.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000197.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000197.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000198.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000198.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000199.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000199.jpg
--------------------------------------------------------------------------------
/matcher/input/testvideo/4CM11_7_R_#34_000200.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/matcher/input/testvideo/4CM11_7_R_#34_000200.jpg
--------------------------------------------------------------------------------
/matcher/run_batch.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | for descr in [
4 | "arm",
5 | "cheek",
6 | "eyes",
7 | "forehead",
8 | "hair",
9 | "hand",
10 | "lips",
11 | "torso",
12 | "leg",
13 | ]:
14 | DRY_RUN = True
15 |
16 | print(f"{'*'*20} WORKING ON: {descr} {'*'*20}")
17 | run_str = f"CUDA_VISIBLE_DEVICES=0 python similarity_from_template_for_dataset.py -descriptorpath output/descriptors/{descr}.pt --use_targeted False"
18 |
19 | if DRY_RUN:
20 | print(run_str)
21 | else:
22 | os.system(run_str)
23 |
--------------------------------------------------------------------------------
/matcher/save_descriptor.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 |
4 | import matplotlib.pyplot as plt
5 | import numpy as np
6 | import torch
7 | import torchvision
8 | from torchvision.io import read_image
9 | from extractor import ViTExtractor
10 | from helpers import str2bool
11 |
12 | # torchvision.set_video_backend("video_reader")
13 |
14 |
15 | def parse():
16 | parser = argparse.ArgumentParser(
17 | description="Save descriptor with clicking on the image"
18 | )
19 | parser.add_argument(
20 | "-template",
21 | type=str,
22 | help="The template image from which to extract the descriptor",
23 | )
24 | parser.add_argument(
25 | "-descriptorname",
26 | type=str,
27 | help="Name of the descriptor to save in output/descritors/",
28 | )
29 |
30 | parser.add_argument(
31 | "--model_type",
32 | default="dino_vits8",
33 | type=str,
34 | help="""type of model to extract.
35 | Choose from [dino_vits8 | dino_vits16 | dino_vitb8 | dino_vitb16 | vit_small_patch8_224 |
36 | vit_small_patch16_224 | vit_base_patch8_224 | vit_base_patch16_224]""",
37 | )
38 |
39 | parser.add_argument(
40 | "--facet",
41 | default="key",
42 | type=str,
43 | help="""facet to create descriptors from.
44 | options: ['key' | 'query' | 'value' | 'token']""",
45 | )
46 |
47 | parser.add_argument(
48 | "--layer", default=11, type=int, help="layer to create descriptors from."
49 | )
50 |
51 | args = parser.parse_args()
52 | return args
53 |
54 |
55 | def get_descriptor(
56 | image_path_a: str,
57 | load_size: int = 224,
58 | layer: int = 11,
59 | facet: str = "key",
60 | bin: bool = False,
61 | stride: int = 4,
62 | model_type: str = "dino_vits8",
63 | descriptorname="testing",
64 | prefix_savepath="output/descriptors/",
65 | ):
66 | # extract descriptors
67 | device = "cuda" if torch.cuda.is_available() else "cpu"
68 | extractor = ViTExtractor(model_type, stride, device=device)
69 | patch_size = extractor.model.patch_embed.patch_size
70 | image_batch_a, image_pil_a = extractor.preprocess(image_path_a, load_size)
71 |
72 | descr = extractor.extract_descriptors(
73 | image_batch_a.to(device), layer, facet, bin, include_cls=False
74 | )
75 |
76 | num_patches_a, load_size_a = extractor.num_patches, extractor.load_size
77 |
78 | # plot
79 | fig, axes = plt.subplots(1, 1)
80 | fig.suptitle("Double click to save descriptor, Right click to exit.")
81 | visible_patches = []
82 | radius = patch_size // 2
83 |
84 | # plot image_a and the chosen patch. if nothing marked chosen patch is cls patch.
85 | axes.imshow(image_pil_a)
86 | pts = np.asarray(
87 | plt.ginput(1, timeout=-1, mouse_stop=plt.MouseButton.RIGHT, mouse_pop=None)
88 | )
89 |
90 | while len(pts) == 1:
91 | y_coor, x_coor = int(pts[0, 1]), int(pts[0, 0])
92 | new_H = patch_size / stride * (load_size_a[0] // patch_size - 1) + 1
93 | new_W = patch_size / stride * (load_size_a[1] // patch_size - 1) + 1
94 | y_descs_coor = int(new_H / load_size_a[0] * y_coor)
95 | x_descs_coor = int(new_W / load_size_a[1] * x_coor)
96 |
97 | # reset previous marks
98 | for patch in visible_patches:
99 | patch.remove()
100 | visible_patches = []
101 |
102 | # draw chosen point
103 | center = (
104 | (x_descs_coor - 1) * stride + stride + patch_size // 2 - 0.5,
105 | (y_descs_coor - 1) * stride + stride + patch_size // 2 - 0.5,
106 | )
107 | patch = plt.Circle(center, radius, color=(1, 0, 0, 0.75))
108 | axes.add_patch(patch)
109 | visible_patches.append(patch)
110 |
111 | # get and draw current similarities
112 | raveled_desc_idx = num_patches_a[1] * y_descs_coor + x_descs_coor
113 | point_descriptor = descr[0, 0, raveled_desc_idx]
114 |
115 | pts = np.asarray(
116 | plt.ginput(1, timeout=-1, mouse_stop=plt.MouseButton.RIGHT, mouse_pop=None)
117 | )
118 | os.makedirs(prefix_savepath, exist_ok=True)
119 |
120 | torch.save(point_descriptor, os.path.join(prefix_savepath, descriptorname))
121 | print("Saved descriptor to: ", os.path.join(prefix_savepath, descriptorname))
122 |
123 | exit() # Do it only once for the release version so noone gets confused...
124 |
125 |
126 | if __name__ == "__main__":
127 | args = parse()
128 |
129 | def getCoord(self):
130 | fig = plt.figure()
131 | ax = fig.add_subplot(111)
132 | plt.imshow(read_image(args.template).permute(1, 2, 0))
133 | cid = fig.canvas.mpl_connect("button_press_event", self.__onclick__)
134 | plt.show()
135 | return self.point
136 |
137 | load_size = 224
138 | bin = False
139 | stride = 4
140 | with torch.no_grad():
141 | descr = get_descriptor(
142 | args.template,
143 | load_size,
144 | args.layer,
145 | args.facet,
146 | bin,
147 | stride,
148 | args.model_type,
149 | args.descriptorname,
150 | prefix_savepath="output/descriptors/",
151 | )
152 |
--------------------------------------------------------------------------------
/matcher/similarity_from_template_for_dataset.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 | import torch
4 | from torchvision.utils import save_image
5 | import torchvision
6 | from tqdm import tqdm
7 | from glob import glob
8 |
9 | # torchvision.set_video_backend("video_reader")
10 | from pathlib import Path
11 |
12 | from matcher.helpers import (
13 | str2bool,
14 | get_best_matching_descriptor,
15 | similarity_from_descriptor,
16 | )
17 |
18 |
19 | def check_integrity(src_dir, dst_dir):
20 | src_images = sorted(glob(f"{src_dir}/*.png")) + sorted(glob(f"{src_dir}/*.jpg"))
21 | dst_images = sorted(glob(f"{dst_dir}/*.png")) + sorted(glob(f"{dst_dir}/*.png"))
22 | return len(src_images) == len(dst_images)
23 |
24 |
25 | def process_video(args, imagesdir, ds):
26 | images = []
27 | for ext in ("*.jpg", "*.png"):
28 | images.extend(list(Path(imagesdir).glob(ext)))
29 |
30 | image_strings = [str(p) for p in sorted(images)]
31 |
32 | descriptor = torch.load(args.descriptorpath)
33 | if args.use_targeted:
34 | targeted_descr = get_best_matching_descriptor(descriptor, image_strings[0])
35 | else:
36 | targeted_descr = descriptor
37 | name = args.descriptorpath.split("/")[-1].split(".")[0]
38 |
39 | ok = False
40 | if os.path.isdir(os.path.dirname(image_strings[0].replace(ds, f"{ds}_{name}_sim"))):
41 | ok = check_integrity(
42 | os.path.dirname(image_strings[0]),
43 | os.path.dirname(image_strings[0]).replace(ds, f"{ds}_{name}_sim"),
44 | )
45 |
46 | if ok:
47 | print(f"[ OK ]{imagesdir}")
48 | return
49 |
50 | if not ok:
51 | print(f"[ FIXING ]{imagesdir}")
52 |
53 | with torch.no_grad():
54 | images = similarity_from_descriptor(
55 | targeted_descr,
56 | image_strings,
57 | args.load_size,
58 | args.layer,
59 | args.facet,
60 | args.bin,
61 | args.stride,
62 | )
63 |
64 | for image, path in zip(images, image_strings):
65 | out_path = path.replace(ds, f"{ds}_{name}_sim")
66 | os.makedirs(os.path.dirname(out_path), exist_ok=True)
67 | save_image(
68 | image,
69 | out_path,
70 | )
71 |
72 |
73 | if __name__ == "__main__":
74 | parser = argparse.ArgumentParser()
75 |
76 | parser.add_argument(
77 | "-descriptorpath",
78 | type=str,
79 | default="output/descriptors/eyes.pt",
80 | help="The descriptor to compare to",
81 | )
82 |
83 | parser.add_argument(
84 | "--load_size", default=224, type=int, help="load size of the input image."
85 | )
86 |
87 | parser.add_argument(
88 | "--stride",
89 | default=4,
90 | type=int,
91 | help="stride of first convolution layer.small stride -> higher resolution.",
92 | )
93 |
94 | parser.add_argument(
95 | "--model_type",
96 | default="dino_vits8",
97 | type=str,
98 | help="type of model to extract. Choose from [dino_vits8 | dino_vits16 | dino_vitb8 | dino_vitb16 | vit_small_patch8_224 | vit_small_patch16_224 | vit_base_patch8_224 | vit_base_patch16_224]",
99 | )
100 | parser.add_argument(
101 | "--facet",
102 | default="key",
103 | type=str,
104 | help="""facet to create descriptors from. options: ['key' | 'query' | 'value' | 'token']""",
105 | )
106 | parser.add_argument(
107 | "--layer", default=11, type=int, help="layer to create descriptors from."
108 | )
109 | parser.add_argument(
110 | "--bin",
111 | default="False",
112 | type=str2bool,
113 | help="create a binned descriptor if True.",
114 | )
115 |
116 | parser.add_argument(
117 | "--use_targeted",
118 | default="False",
119 | type=str2bool,
120 | help="Match to fist image in sequence if True.",
121 | )
122 |
123 | parser.add_argument(
124 | "--dataset",
125 | default="key",
126 | type=str,
127 | help="""options: [ 'ipn' | 'kth' | 'sbu' ]""",
128 | )
129 |
130 | args = parser.parse_args()
131 | ds = args.dataset
132 | root_dir = f"data/{ds}/"
133 |
134 | tmp = os.listdir(root_dir)
135 | all_images_dirs = [os.path.join(root_dir, x) for x in tmp]
136 | for imagesdir in tqdm(all_images_dirs, position=0):
137 | process_video(args, imagesdir, ds)
138 |
--------------------------------------------------------------------------------
/models/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/models/__init__.py
--------------------------------------------------------------------------------
/models/valid_models.py:
--------------------------------------------------------------------------------
1 | privacy_models = {
2 | "resnet18": {
3 | "num_frames": 1,
4 | "sample_rate": 1,
5 | "crop_size": 224,
6 | },
7 | "resnet50": {
8 | "num_frames": 1,
9 | "sample_rate": 1,
10 | "crop_size": 224,
11 | },
12 | "resnet101": {
13 | "num_frames": 1,
14 | "sample_rate": 1,
15 | "crop_size": 224,
16 | },
17 | "vit": {
18 | "num_frames": 1,
19 | "sample_rate": 1,
20 | "crop_size": 224,
21 | },
22 | "vit_b_32": {
23 | "num_frames": 1,
24 | "sample_rate": 1,
25 | "crop_size": 224,
26 | },
27 | }
28 |
29 | action_models = {
30 | "x3d_s": {
31 | "crop_size": 182,
32 | "num_frames": 13,
33 | "sample_rate": 6,
34 | },
35 | "x3d_m": {
36 | "crop_size": 224,
37 | "num_frames": 16,
38 | "sample_rate": 5,
39 | },
40 | "x3d_l": {
41 | "crop_size": 312,
42 | "num_frames": 16,
43 | "sample_rate": 5,
44 | },
45 | "slowfast_r50": {
46 | "crop_size": 224,
47 | "num_frames": 32,
48 | "sample_rate": 2,
49 | "frames_per_second": 30,
50 | "slowfast_alpha": 4,
51 | },
52 | "slow_r50": {
53 | "crop_size": 224,
54 | "num_frames": 8,
55 | "sample_rate": 8,
56 | },
57 | "i3d_r50": {
58 | "crop_size": 224,
59 | "num_frames": 8,
60 | "sample_rate": 8,
61 | },
62 | "c2d_r50": {
63 | "crop_size": 224,
64 | "num_frames": 8,
65 | "sample_rate": 8,
66 | },
67 | "csn_r101": {
68 | "crop_size": 224,
69 | "num_frames": 32,
70 | "sample_rate": 2,
71 | },
72 | "r2plus1d_r50": {
73 | "crop_size": 224,
74 | "num_frames": 16,
75 | "sample_rate": 4,
76 | },
77 | "mvit_base_16x4": {
78 | "crop_size": 224,
79 | "num_frames": 16,
80 | "sample_rate": 4,
81 | },
82 | "e2s_x3d_s": {
83 | "crop_size": 182,
84 | "num_frames": 13,
85 | "sample_rate": 6,
86 | },
87 | "e2s_x3d_m": {
88 | "crop_size": 224,
89 | "num_frames": 16,
90 | "sample_rate": 5,
91 | },
92 | "e2s_x3d_l": {
93 | "crop_size": 312,
94 | "num_frames": 16,
95 | "sample_rate": 5,
96 | },
97 | }
98 |
99 | valid_models = dict(list(privacy_models.items()) + list(action_models.items()))
100 |
--------------------------------------------------------------------------------
/output/descriptors/arm.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/output/descriptors/arm.pt
--------------------------------------------------------------------------------
/output/descriptors/cheek.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/output/descriptors/cheek.pt
--------------------------------------------------------------------------------
/output/descriptors/eyes.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/output/descriptors/eyes.pt
--------------------------------------------------------------------------------
/output/descriptors/forehead.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/output/descriptors/forehead.pt
--------------------------------------------------------------------------------
/output/descriptors/hair.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/output/descriptors/hair.pt
--------------------------------------------------------------------------------
/output/descriptors/hand.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/output/descriptors/hand.pt
--------------------------------------------------------------------------------
/output/descriptors/leg.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/output/descriptors/leg.pt
--------------------------------------------------------------------------------
/output/descriptors/lips.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/output/descriptors/lips.pt
--------------------------------------------------------------------------------
/output/descriptors/mouth.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/output/descriptors/mouth.pt
--------------------------------------------------------------------------------
/output/descriptors/torso.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/output/descriptors/torso.pt
--------------------------------------------------------------------------------
/privacy_eval.py:
--------------------------------------------------------------------------------
1 | import json
2 | import sys
3 | from email import parser
4 | from os.path import join
5 |
6 | import torch
7 | from torch import nn, optim
8 | from torch.utils.data import DataLoader
9 | from torch.utils.tensorboard import SummaryWriter
10 |
11 | from config import build_cfg, cfg, parser
12 | from dataset.db_factory import DBfactory
13 | from simulation.simulation import Simulation
14 | from utils.info_print import *
15 | from utils.model_utils import (
16 | build_info_name,
17 | build_model_name_singleframe,
18 | build_model_privacy,
19 | load_weights,
20 | )
21 | from utils.Trainer import Trainer
22 |
23 | # parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
24 | parser.add_argument("--architecture", type=str)
25 | cfg = build_cfg()
26 |
27 |
28 | def main():
29 | torch.backends.cudnn.benchmark = True
30 |
31 | datasetname = cfg["datasetname"]
32 | batch_size = cfg["batch_size"]
33 | num_workers = cfg["num_workers"]
34 | accum_every = cfg["accumulate_grad_batches"]
35 | gpus = cfg["gpus"]
36 | multigpu = len(gpus) > 1
37 |
38 | test_dataset = DBfactory(datasetname, set_split="test", config=cfg)
39 |
40 | test_dataloader = DataLoader(
41 | test_dataset,
42 | batch_size,
43 | num_workers=num_workers,
44 | shuffle=True,
45 | drop_last=False,
46 | )
47 |
48 | # ----------------- Setup Model & Load weights if supplied -----------------
49 |
50 | model = build_model_privacy(
51 | cfg["architecture"],
52 | cfg["pretrained"],
53 | test_dataset.num_classes,
54 | cfg["train_backbone"],
55 | )
56 | model.name = build_model_name_singleframe(cfg)
57 |
58 | if multigpu:
59 | model = nn.DataParallel(model, device_ids=gpus)
60 | model.name = model.module.name
61 |
62 | optimizer = optim.AdamW(model.parameters())
63 | print(
64 | "MAKE SURE YOU RUN create_table_action.py FIRST SO THAT ALL THE BEST.PT FILES ARE CREATED"
65 | )
66 | cfg["weights_path"] = f"runs/privacy/{datasetname}/{model.name}/best.pt"
67 | load_weights(model, optimizer, cfg["weights_path"])
68 |
69 | criterion = nn.CrossEntropyLoss().cuda()
70 |
71 | model = model.cuda()
72 | added_info = build_info_name(cfg)
73 |
74 | sim_name = f"privacy_eval_attributes/{datasetname}/{cfg['datasetname']}{added_info}/{model.name}"
75 | # sim_name = (
76 | # f"privacy_eval/{datasetname}/{cfg['datasetname']}{added_info}/{model.name}"
77 | # )
78 |
79 | with Simulation(sim_name=sim_name, output_root="runs") as sim:
80 | print(f'Running: python {" ".join(sys.argv)}\n\n\n')
81 | print_learnable_params(model)
82 | print_data_augmentation_transform(test_dataset.transform)
83 | print(f"Begin training: {model.name}")
84 |
85 | with open(join(sim.outdir, "cfg.txt"), "w") as f:
86 | json.dump(cfg, f, indent=2)
87 |
88 | writer = SummaryWriter(join(sim.outdir, "tensorboard"))
89 | trainer = Trainer(sim)
90 |
91 | trainer.do(
92 | "eval",
93 | model,
94 | test_dataloader,
95 | 0,
96 | criterion,
97 | None,
98 | writer,
99 | log_video=True,
100 | )
101 | print(f"\nRun {sim.outdir} finished\n")
102 |
103 | writer.close
104 |
105 |
106 | if __name__ == "__main__":
107 | main()
108 |
--------------------------------------------------------------------------------
/privacy_train.py:
--------------------------------------------------------------------------------
1 | import json
2 | import sys
3 | from email import parser
4 | from os.path import join
5 |
6 | import torch
7 | from torch import nn, optim
8 | from torch.utils.data import DataLoader
9 | from torch.utils.tensorboard import SummaryWriter
10 |
11 | from config import build_cfg, cfg, parser
12 | from dataset.db_factory import DBfactory
13 | from simulation.simulation import Simulation
14 | from utils.info_print import *
15 | from utils.model_utils import (
16 | build_model_name_singleframe,
17 | build_model_privacy,
18 | load_weights,
19 | )
20 | from utils.Trainer import Trainer
21 |
22 | # parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
23 | parser.add_argument("--architecture", type=str)
24 | cfg = build_cfg()
25 |
26 |
27 | def main():
28 | torch.backends.cudnn.benchmark = True
29 |
30 | datasetname = cfg["datasetname"]
31 | batch_size = cfg["batch_size"]
32 | num_workers = cfg["num_workers"]
33 | accum_every = cfg["accumulate_grad_batches"]
34 | gpus = cfg["gpus"]
35 | multigpu = len(gpus) > 1
36 |
37 | train_dataset = DBfactory(datasetname, set_split="train", config=cfg)
38 |
39 | test_dataset = DBfactory(datasetname, set_split="test", config=cfg)
40 |
41 | train_dataloader = DataLoader(
42 | train_dataset,
43 | batch_size,
44 | num_workers=num_workers,
45 | shuffle=True,
46 | drop_last=False,
47 | )
48 |
49 | test_dataloader = DataLoader(
50 | test_dataset,
51 | batch_size,
52 | num_workers=num_workers,
53 | shuffle=True,
54 | drop_last=False,
55 | )
56 |
57 | # ----------------- Setup Model & Load weights if supplied -----------------
58 |
59 | model = build_model_privacy(
60 | cfg["architecture"],
61 | cfg["pretrained"],
62 | train_dataset.num_classes,
63 | cfg["train_backbone"],
64 | )
65 | model.name = build_model_name_singleframe(cfg)
66 |
67 | if multigpu:
68 | model = nn.DataParallel(model, device_ids=gpus)
69 | model.name = model.module.name
70 |
71 | optimizer = optim.AdamW(model.parameters())
72 | load_weights(model, optimizer, cfg["weights_path"])
73 |
74 | criterion = nn.CrossEntropyLoss().cuda()
75 |
76 | model = model.cuda()
77 |
78 | sim_name = f"privacy/{datasetname}/{model.name}"
79 | best_top1 = 0
80 | with Simulation(sim_name=sim_name, output_root="runs") as sim:
81 | print(f'Running: python {" ".join(sys.argv)}\n\n\n')
82 | print_learnable_params(model)
83 | print_data_augmentation_transform(train_dataset.transform)
84 | print(f"Begin training: {model.name}")
85 |
86 | with open(join(sim.outdir, "cfg.txt"), "w") as f:
87 | json.dump(cfg, f, indent=2)
88 |
89 | writer = SummaryWriter(join(sim.outdir, "tensorboard"))
90 | trainer = Trainer(sim)
91 |
92 | # -------------- MAIN TRAINING LOOP ----------------------
93 | for epoch in range(cfg["num_epochs"]):
94 | trainer.do(
95 | "train",
96 | model,
97 | train_dataloader,
98 | epoch,
99 | criterion,
100 | optimizer,
101 | writer,
102 | log_video=False,
103 | )
104 |
105 | if epoch % 5 == 0 or epoch == cfg["num_epochs"] - 1:
106 | curr_top1 = trainer.do(
107 | "test",
108 | model,
109 | test_dataloader,
110 | epoch,
111 | criterion,
112 | None,
113 | writer,
114 | log_video=False,
115 | )
116 |
117 | if curr_top1 > best_top1:
118 | best_top1 = curr_top1
119 |
120 | checkpoint = {
121 | "epoch": epoch,
122 | "state_dict": (
123 | model.module.state_dict()
124 | if multigpu
125 | else model.state_dict()
126 | ),
127 | "optimizer": optimizer.state_dict(),
128 | }
129 | sim.save_pytorch(checkpoint, epoch=epoch)
130 |
131 | trainer.do(
132 | "test",
133 | model,
134 | test_dataloader,
135 | epoch,
136 | criterion,
137 | None,
138 | writer,
139 | )
140 | print(f"\nRun {sim.outdir} finished\n")
141 |
142 | writer.close
143 |
144 |
145 | if __name__ == "__main__":
146 | main()
147 |
--------------------------------------------------------------------------------
/readme_assets/ablation.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/readme_assets/ablation.png
--------------------------------------------------------------------------------
/readme_assets/baselines_qualitative.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/readme_assets/baselines_qualitative.png
--------------------------------------------------------------------------------
/readme_assets/hair_tiled.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/readme_assets/hair_tiled.png
--------------------------------------------------------------------------------
/readme_assets/iid.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/readme_assets/iid.png
--------------------------------------------------------------------------------
/readme_assets/lambda.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/readme_assets/lambda.png
--------------------------------------------------------------------------------
/readme_assets/method.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/readme_assets/method.png
--------------------------------------------------------------------------------
/readme_assets/orig_tiled.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/readme_assets/orig_tiled.png
--------------------------------------------------------------------------------
/readme_assets/saliency.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/readme_assets/saliency.png
--------------------------------------------------------------------------------
/readme_assets/table.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/readme_assets/table.png
--------------------------------------------------------------------------------
/readme_assets/teaser.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/readme_assets/teaser.png
--------------------------------------------------------------------------------
/readme_assets/vscodetargets.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/readme_assets/vscodetargets.png
--------------------------------------------------------------------------------
/simulation/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/simulation/__init__.py
--------------------------------------------------------------------------------
/simulation/helpers.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | from collections import OrderedDict
4 | import datetime
5 | import os
6 | import os.path
7 | from os.path import join
8 | import subprocess
9 |
10 |
11 | def setup_outdir(name, base=None):
12 | """
13 | Setup and output directory.
14 |
15 | args:
16 | name name of the output directory
17 | base base directory (or file in base directory), it not passed,
18 | the parent directory of the directory in which this file is
19 | in is used (assuming that we are in a utils module which is
20 | a directory lying in the project base folder)
21 |
22 | Best practice usage:
23 |
24 | setup_outdir("out", __file__)
25 |
26 | to get subfolder in directory of main script or
27 |
28 | setup_outdir("out", os.path.curdir)
29 |
30 | to get subfolder of current working directory.
31 | """
32 |
33 | if base is not None:
34 | if os.path.isfile(base):
35 | base = os.path.dirname(base)
36 | elif os.path.isdir(base):
37 | pass
38 | else:
39 | raise IOError("need to pass a base which is either a file or a" + \
40 | "directory.")
41 | else:
42 | base = os.path.dirname(os.path.dirname(__file__))
43 |
44 | outdir = join(base, name)
45 |
46 | if not os.path.exists(outdir):
47 | os.makedirs(outdir)
48 |
49 | return outdir
50 |
51 |
52 | def setup_numbered_outdir(basename, base=None, title=None):
53 | """
54 | Setup and a numbered output directory for an experiment.
55 |
56 | args:
57 | name name of the output directory in which numbered directory is
58 | created
59 | base base directory (or file in base directory), it not passed,
60 | the parent directory of the directory in which this file is
61 | in is used (assuming that we are in a utils module which is
62 | a directory lying in the project base folder)
63 | title additional string to append to directory name
64 |
65 | Best practice usage: see documentation of setup_outdir().
66 | """
67 | outdir = setup_outdir(basename, base)
68 |
69 | dirs = [d for d in os.listdir(outdir) if os.path.isdir(join(outdir, d))]
70 | dirs_split = [d.split('_')[0] for d in dirs]
71 |
72 | dir_nums = []
73 |
74 | for d in dirs_split:
75 | try:
76 | num = int(d)
77 | dir_nums += [num]
78 | except ValueError:
79 | pass
80 |
81 | n = max(dir_nums) + 1 if len(dir_nums) > 0 else 0
82 |
83 | now = '{:%Y-%m-%d_%H-%M-%S}'.format(datetime.datetime.now())
84 | dirname = '{0:04d}_{1:s}{2:s}'.format(n, now, '_'+title if title else '')
85 | outdir = setup_outdir(join(outdir, dirname), base)
86 |
87 | return outdir
88 |
89 |
90 | def get_git_status():
91 | rev = subprocess.run(['git', 'rev-parse', 'HEAD'], stdout=subprocess.PIPE).stdout
92 | stat = subprocess.run(['git', 'status'], stdout=subprocess.PIPE).stdout
93 | diff = subprocess.run(['git', 'diff'], stdout=subprocess.PIPE).stdout
94 |
95 | s = '\n'.join(['revision: '+rev.decode('UTF-8'), stat.decode('UTF-8'), diff.decode('UTF-8')])
96 |
97 | return s
98 |
99 |
100 | def shell_exec(command, check=False):
101 | run = subprocess.run(command, check=check, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
102 |
103 | return run.stdout.decode('utf-8'), run.stderr.decode('utf-8'),
104 |
105 |
106 | def shell_exec_silent(command):
107 | return subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE).returncode
108 |
109 |
110 | def dict_to_string(d):
111 | assert type(d) in [dict, OrderedDict]
112 |
113 | if type(d) is OrderedDict:
114 | s = '_'.join(str(k)+str(v)
115 | for k, v in d.items())
116 | else:
117 | s = '_'.join(str(k)+str(d[k])
118 | for k in sorted(d.keys()))
119 |
120 | return s
121 |
122 |
--------------------------------------------------------------------------------
/transforms/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/transforms/__init__.py
--------------------------------------------------------------------------------
/utils/AverageMeter.py:
--------------------------------------------------------------------------------
1 | class AverageMeter(object):
2 | """Computes and stores the average and current value"""
3 | def __init__(self):
4 | self.reset()
5 |
6 | def reset(self):
7 | self.val = 0
8 | self.avg = 0
9 | self.sum = 0
10 | self.count = 0
11 |
12 | def update(self, val, n=1):
13 | self.val = val
14 | self.sum += val * n
15 | self.count += n
16 | self.avg = self.sum / self.count
--------------------------------------------------------------------------------
/utils/ConfusionMatrix.py:
--------------------------------------------------------------------------------
1 | import io
2 |
3 | import torch
4 | import PIL.Image
5 | import matplotlib as mpl
6 |
7 | # mpl.use('Agg')
8 | import numpy as np
9 | import matplotlib.pyplot as plt
10 | from torchvision.transforms import ToTensor
11 |
12 |
13 | class ConfusionMatrix(object):
14 | def __init__(self, n_classes, labels=None):
15 | self.n_classes = n_classes
16 | self.mat = torch.zeros(n_classes, n_classes, requires_grad=False)
17 | self.labels = labels
18 |
19 | def update(self, preds, labels):
20 | for p, t in zip(preds, labels):
21 | self.mat[t, p] += 1
22 |
23 | def reset(self):
24 | self.mat = torch.zeros(self.n_classes, self.n_classes, requires_grad=False)
25 |
26 | def _create_figure(self, display_values, normalize, fontsize=5, label_angle=45):
27 | mat = self.mat
28 |
29 | if normalize:
30 | total_samples = mat.sum(axis=1)
31 | mat = mat / total_samples[:, None]
32 |
33 | fig = plt.figure()
34 | ax = fig.add_subplot(111)
35 | cax = ax.matshow(mat, cmap=plt.cm.Blues, vmin=0, vmax=1)
36 |
37 | if display_values:
38 | # threshold = mat.max() / 2.0
39 | for (i, j), z in np.ndenumerate(mat):
40 | if not np.isnan(z) and z > 0.0001:
41 | color = "white" if z > 0.5 else "black"
42 | ax.text(
43 | j,
44 | i,
45 | "{:.2f}".format(z),
46 | ha="center",
47 | va="center",
48 | color=color,
49 | fontsize=fontsize - 2,
50 | )
51 |
52 | if self.labels is not None:
53 | ax.set_xticks(range(0, self.n_classes))
54 | ax.set_xticklabels(self.labels, rotation=label_angle, fontsize=fontsize)
55 | ax.set_yticks(range(0, self.n_classes))
56 | ax.set_yticklabels(self.labels, fontsize=fontsize)
57 |
58 | ax.set_ylabel("True Class")
59 | ax.set_xlabel("Predicted Class")
60 | fig.colorbar(cax)
61 |
62 | return fig
63 |
64 | def _fig_to_img(self, fig, dpi):
65 | buf = io.BytesIO()
66 | fig.savefig(buf, dpi=dpi, format="png")
67 | buf.seek(0)
68 | image = PIL.Image.open(buf).convert("RGB")
69 | image = ToTensor()(image)
70 | plt.close(fig)
71 | return image
72 |
73 | def as_img(
74 | self, display_values=True, normalize=True, dpi=800, fontsize=5, label_angle=45
75 | ):
76 | fig = self._create_figure(display_values, normalize, fontsize, label_angle)
77 | image = self._fig_to_img(fig, dpi)
78 | return image
79 |
80 |
81 | if __name__ == "__main__":
82 | cm = ConfusionMatrix(3, list(range(3)))
83 | print(f"{cm.n_classes}, {cm.labels}")
84 | allfig, ax = plt.subplots(2, 3)
85 | ax = ax.flatten()
86 |
87 | for i in range(3):
88 | print(f"in iter {i}")
89 | predictions_random = np.floor(3 * np.random.rand(10)).astype(int)
90 | # predictions_random = [i] * 10
91 | labels_uniform = np.floor(3 * np.random.rand(10)).astype(int)
92 | cm.update(predictions_random, labels_uniform)
93 | I = cm.as_img(dpi=250, display_values=True, normalize=False).permute(1, 2, 0)
94 | ax[i].imshow(I)
95 | ax[i].axis("off")
96 |
97 | H = cm.as_histogram_img(dpi=250).permute(1, 2, 0)
98 | ax[i + 3].imshow(H)
99 | ax[i + 3].axis("off")
100 |
101 | plt.show()
102 |
--------------------------------------------------------------------------------
/utils/VideoTensorViewer.py:
--------------------------------------------------------------------------------
1 | from matplotlib import pyplot as plt
2 | from matplotlib.pyplot import figure
3 |
4 | class VideoTensorViewer(object):
5 | def __init__(self, vol, wrap_around=True, figsize=(8,6)):
6 | # vol has to be shape C,T, H, W
7 | self.vol = vol
8 | self.wrap_around = wrap_around
9 | self.slices = vol.shape[1]
10 | self.ind = 0
11 | self.fig, self.ax = plt.subplots(figsize=figsize)
12 | self.fig.canvas.mpl_connect('scroll_event', self.onscroll)
13 | self.im = self.ax.imshow(self.vol[:, self.ind, ...].permute(1,2,0))
14 | self.update()
15 | plt.axis('off')
16 | plt.tight_layout()
17 | plt.show()
18 |
19 | def onscroll(self, event):
20 | if event.button == 'up':
21 | if not self.wrap_around and self.ind+1 == self.slices:
22 | return
23 | self.ind = (self.ind + 1) % self.slices
24 | else:
25 | if not self.wrap_around and self.ind == 0:
26 | return
27 | self.ind = (self.ind - 1) % self.slices
28 | plt.title(f'{self.ind}')
29 | self.update()
30 |
31 | def update(self):
32 | self.im.set_data(self.vol[:, self.ind, :, :].permute(1,2,0))
33 | self.im.axes.figure.canvas.draw()
--------------------------------------------------------------------------------
/utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/utils/__init__.py
--------------------------------------------------------------------------------
/utils/info_print.py:
--------------------------------------------------------------------------------
1 | import pprint
2 | import termplotlib as tpl
3 |
4 |
5 | def print_learnable_params(model, verbose=True):
6 | print("-----------------------------------------------------------")
7 | print("Printing parameters of model which have requires_grad==True")
8 | print("-----------------------------------------------------------")
9 |
10 | learnable_names = []
11 | learnable_num_param = []
12 |
13 | frozen_names = []
14 | frozen_num_params = []
15 | for name, param in model.named_parameters():
16 | if param.requires_grad == True:
17 | learnable_names.append(name)
18 | learnable_num_param.append(param.numel())
19 | if param.requires_grad == False:
20 | frozen_names.append(name)
21 | frozen_num_params.append(param.numel())
22 |
23 | num_learnable_params = sum(learnable_num_param)
24 | num_frozen_params = sum(frozen_num_params)
25 | if verbose == True:
26 | fig = tpl.figure()
27 | fig.barh(learnable_num_param, learnable_names)
28 | fig.show()
29 | print("-----------------------------------------------------------")
30 | print(f"Total Number parameters: {(num_learnable_params+num_frozen_params):.2E}")
31 | print(f"Total Number of learnable parameters: {num_learnable_params:.2E}")
32 | print(f"Total Number of frozen parameters: {num_frozen_params:.2E}")
33 | print("-----------------------------------------------------------\n\n\n")
34 |
35 |
36 | def print_config_options(cfg):
37 | print("-----------------------------------------------------------")
38 | print("Printing configuration options")
39 | print("-----------------------------------------------------------")
40 | pprint.pprint(cfg)
41 | print("-----------------------------------------------------------\n\n\n")
42 |
43 |
44 | def print_data_augmentation_transform(transform):
45 | print("-----------------------------------------------------------")
46 | print("Printing Data Augmentation Transform")
47 | print("-----------------------------------------------------------")
48 |
49 | for i in range(len(transform.__dict__["transforms"])):
50 | pprint.pprint(transform.__dict__["transforms"][i].__dict__)
51 | print("-----------------------------------------------------------\n\n\n")
52 |
--------------------------------------------------------------------------------
/utils/metrics.py:
--------------------------------------------------------------------------------
1 | def accuracy(output, target, topk=(1,)):
2 | """Computes the precision@k for the specified values of k"""
3 | maxk = max(topk)
4 | batch_size = target.size(0)
5 |
6 | _, pred = output.topk(maxk, 1, True, True)
7 | pred = pred.t()
8 | correct = pred.eq(target.view(1, -1).expand_as(pred))
9 |
10 | res = []
11 | for k in topk:
12 | correct_k = correct[:k].contiguous().view(-1).float().sum(0)
13 | res.append(correct_k.mul_(100.0 / batch_size))
14 | return res
--------------------------------------------------------------------------------
/visualize_qualitative/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/visualize_qualitative/__init__.py
--------------------------------------------------------------------------------
/visualize_qualitative/baselines_qualitative_vis.py:
--------------------------------------------------------------------------------
1 | from torchvision.io import write_png
2 | import torch
3 | import matplotlib.pyplot as plt
4 |
5 | from email import parser
6 |
7 | import torch
8 | from torch.utils.data import DataLoader
9 |
10 |
11 | from dataset.db_factory import DBfactory
12 | from utils.info_print import *
13 | from config import parser
14 |
15 |
16 | from utils.model_utils import (
17 | set_seed,
18 | )
19 |
20 |
21 | import argparse
22 |
23 | parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
24 |
25 | parser.add_argument("--masked", default=None) # person / background
26 | parser.add_argument("--downsample", type=int, default=None)
27 | parser.add_argument("--interpolation", default=None) # nearest / bilinear / ...
28 | parser.add_argument("--blur", default=None) # weak / strong / None
29 | parser.add_argument("--afd_combine_level", type=int, default=None)
30 | parser.add_argument("-combine_masked", action="store_true")
31 | parser.add_argument("--downsample_masked", type=int, default=None)
32 | parser.add_argument("--interpolation_masked", default=None) # nearest / bilinear / ...
33 | args = parser.parse_args()
34 |
35 |
36 | def main():
37 | cfg = args.__dict__.copy()
38 | # seeds = [1, 90, 986576]
39 | seeds = [1123, 4, 986576]
40 | dir_locs = ["1", "2", "3"]
41 | zipped = list(zip(seeds, dir_locs))
42 |
43 | id_sample = 1 # , 1, 2 <------- Only touch this one
44 | seed = zipped[id_sample][0]
45 | dir_loc = zipped[id_sample][1]
46 |
47 | torch.backends.cudnn.benchmark = True
48 | cfg["datasetname"] = "ipn"
49 | cfg["privacy"] = True
50 | cfg["num_workers"] = 0
51 | cfg["batch_size"] = 1
52 | cfg["architecture"] = "resnet50"
53 | cfg["blur"] = None
54 | cfg["selectively_mask"] = False
55 |
56 | datasetname = cfg["datasetname"]
57 | batch_size = cfg["batch_size"]
58 | num_workers = cfg["num_workers"]
59 |
60 | images = []
61 |
62 | # ----------- Original -----------
63 | set_seed(seed)
64 | test_dataset = DBfactory(datasetname, set_split="test", config=cfg)
65 |
66 | test_dataloader = DataLoader(
67 | test_dataset,
68 | batch_size,
69 | num_workers=num_workers,
70 | shuffle=True,
71 | drop_last=False,
72 | )
73 |
74 | for i, (inputs, masks, flows, labels) in enumerate(test_dataloader):
75 | images.append(inputs[0])
76 | break
77 |
78 | # ----------- DOWNSAMPLE -----------
79 |
80 | for downsample in [4, 16]:
81 | set_seed(seed)
82 | cfg["downsample"] = downsample
83 | cfg["interpolation"] = "nearest"
84 |
85 | test_dataset = DBfactory(datasetname, set_split="test", config=cfg)
86 | test_dataloader = DataLoader(
87 | test_dataset,
88 | batch_size,
89 | num_workers=num_workers,
90 | shuffle=True,
91 | drop_last=False,
92 | )
93 |
94 | for i, (inputs, masks, flows, labels) in enumerate(test_dataloader):
95 | images.append(inputs[0])
96 | break
97 |
98 | # ----------- BLUR -----------
99 |
100 | for blur in ["weak", "strong"]:
101 | set_seed(seed)
102 | cfg["downsample"] = None
103 | cfg["blur"] = blur
104 |
105 | test_dataset = DBfactory(datasetname, set_split="test", config=cfg)
106 | test_dataloader = DataLoader(
107 | test_dataset,
108 | batch_size,
109 | num_workers=num_workers,
110 | shuffle=True,
111 | drop_last=False,
112 | )
113 |
114 | for i, (inputs, masks, flows, labels) in enumerate(test_dataloader):
115 | images.append(inputs[0])
116 | break
117 |
118 | # ----------- MASKED -----------
119 | set_seed(seed)
120 | cfg["downsample"] = None
121 | cfg["blur"] = None
122 | cfg["masked"] = "person"
123 | cfg["mean_fill"] = True
124 |
125 | test_dataset = DBfactory(datasetname, set_split="test", config=cfg)
126 | test_dataloader = DataLoader(
127 | test_dataset,
128 | batch_size,
129 | num_workers=num_workers,
130 | shuffle=True,
131 | drop_last=False,
132 | )
133 |
134 | for i, (inputs, masks, flows, labels) in enumerate(test_dataloader):
135 | images.append(inputs[0])
136 | break
137 |
138 | t_inv = test_dataset.inverse_normalise
139 | for en, img in enumerate(images):
140 | img = (t_inv(img.unsqueeze(1)) * 255).squeeze().byte()
141 | write_png(img, f"out/{dir_loc}/{en:03d}.png")
142 | print(f'Wrote to "out/{dir_loc}/{en:03d}.png"')
143 |
144 |
145 | if __name__ == "__main__":
146 | main()
147 |
--------------------------------------------------------------------------------
/visualize_qualitative/similarity_from_tempate_vis.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import matplotlib
3 | import matplotlib.pyplot as plt
4 | from einops import rearrange
5 | from torchvision.io import read_image
6 | from torchvision.utils import make_grid
7 | from matcher.helpers import similarity_from_descriptor
8 | from matcher.helpers import get_best_matching_descriptor
9 |
10 | import torch
11 |
12 | parser = argparse.ArgumentParser()
13 | parser.add_argument("--descriptor", type=str, required=True)
14 | parser.add_argument("--image", type=str, required=True)
15 | args = parser.parse_args()
16 |
17 | if __name__ == "__main__":
18 | images = [args.image]
19 | descriptor = torch.load(args.descriptor)
20 | # targeted_descr = get_best_matching_descriptor(descriptor, images[0])
21 |
22 | with torch.no_grad():
23 | hair = similarity_from_descriptor(descriptor, images)[0].cpu().squeeze() * 255.0
24 | orig = read_image(images[0]).squeeze().permute(1, 2, 0)
25 |
26 | hair = hair.clamp(80, 255)
27 |
28 | # # split into 24x24 patches
29 | new = orig.unfold(0, 48, 48).unfold(1, 48, 48)
30 | new = rearrange(new, "x y c h w -> (x y) c h w")
31 | plotable = make_grid(new, nrow=6, padding=10, pad_value=255)
32 | plt.figure()
33 | plt.axis("off")
34 | plt.imshow(plotable.permute(1, 2, 0))
35 | plt.tight_layout()
36 | plt.show()
37 | # plt.savefig("readme_assets/orig_tiled.png", dpi=300, bbox_inches="tight")
38 | # print("Saved image to readme_assets/orig_tiled.png")
39 |
40 | # split into 24x24 patches
41 | new = hair.unfold(0, 48, 48).unfold(1, 48, 48)
42 | new = rearrange(new, "x y h w -> (x y) h w").unsqueeze(1)
43 | plotable = make_grid(new, nrow=6, padding=10, pad_value=255)
44 | plt.axis("off")
45 | plt.imshow(plotable.permute(1, 2, 0)[:, :, 0], cmap="hot")
46 | plt.tight_layout()
47 | plt.show()
48 | # plt.savefig("readme_assets/hair_tiled.png", dpi=300, bbox_inches="tight")
49 | # print("Saved image to readme_assets/hair_tiled.png")
50 |
--------------------------------------------------------------------------------
/visualize_quantitative/Fig7_pgf_plots_average.py:
--------------------------------------------------------------------------------
1 | import matplotlib.pyplot as plt
2 | import seaborn as sns
3 | import pandas as pd
4 | import numpy as np
5 | from torchvision.io import read_image
6 | from torchvision.transforms import Resize, Compose, ToTensor, Normalize
7 | from os import path, listdir
8 | from os.path import join
9 | from glob import glob
10 | import random
11 | import torch
12 |
13 | from utils.model_utils import set_seed
14 |
15 | import matplotlib as mpl
16 |
17 | mpl.use("Agg")
18 |
19 | if __name__ == "__main__":
20 |
21 | def f(lam, a, p):
22 | return (1 - lam) * a + lam * (1 - p)
23 |
24 | labels = [
25 | "BDQ \cite{bqn}",
26 | "ALF \cite{pahmdb}",
27 | "ELR\cite{ryoo2018extremelowres} s=2",
28 | "ELR\cite{ryoo2018extremelowres} s=4",
29 | "ELR\cite{ryoo2018extremelowres} s=8",
30 | "ELR\cite{ryoo2018extremelowres} s=16",
31 | "ELR\cite{ryoo2018extremelowres} s=32",
32 | "ELR\cite{ryoo2018extremelowres} s=64",
33 | "Ours \\textdagger",
34 | "Ours",
35 | ]
36 | ipn_action = [81, 76, 82.31, 81.76, 79.48, 70.82, 52.96, 31.63, 87.11, 83.15]
37 | ipn_privacy = [59, 65, 80.04, 72.01, 70.08, 64.32, 63.29, 62.7, 55.38, 54.12]
38 |
39 | kth_action = [91.11, 85.89, 91.64, 92.99, 91.22, 91.22, 85.57, 56.21, 88.67, 82.70]
40 | kth_privacy = [7.15, 19.27, 91.82, 92.50, 91.58, 88.86, 82.56, 58.35, 5.46, 4.31]
41 |
42 | sbu_action = [84.04, 82.00, 97.93, 98.27, 98.47, 96.27, 92.42, 80.05, 84.04, 86.74]
43 | sbu_privacy = [34.18, 48.00, 85.1, 91.48, 84.04, 82.97, 64.89, 43.61, 11.7, 13.19]
44 |
45 | all_actions = torch.stack(
46 | [torch.Tensor(ipn_action), torch.Tensor(kth_action), torch.Tensor(sbu_action)]
47 | )
48 | all_actions = all_actions.mean(0).tolist()
49 |
50 | all_privacy = torch.stack(
51 | [
52 | torch.Tensor(ipn_privacy),
53 | torch.Tensor(kth_privacy),
54 | torch.Tensor(sbu_privacy),
55 | ]
56 | )
57 | all_privacy = all_privacy.mean(0).tolist()
58 |
59 | for lbl, a, p in zip(labels, all_actions, all_privacy):
60 | if lbl in [
61 | "ELR\cite{ryoo2018extremelowres} s=2",
62 | "ELR\cite{ryoo2018extremelowres} s=4",
63 | "ELR\cite{ryoo2018extremelowres} s=8",
64 | # "ELR\cite{ryoo2018extremelowres} s=16",
65 | # "ELR\cite{ryoo2018extremelowres} s=32",
66 | "ELR\cite{ryoo2018extremelowres} s=64",
67 | ]:
68 | continue
69 | lam = np.linspace(0, 1, 2)
70 |
71 | if lbl in ["Ours \\textdagger", "Ours"]:
72 | print("\\addplot+[style={mark=square*,ultra thick}]")
73 | else:
74 | print("\\addplot+[]")
75 | print("coordinates {")
76 | for l in lam:
77 | y = f(l, a / 100, p / 100)
78 | # print(f"({100*l:.2f}, {100*y:.2f})", end="")
79 | print(f"({l:.2f}, {y:.2f})", end="")
80 | print("};")
81 | print("\\addlegendentry{%s}" % lbl)
82 | print()
83 |
--------------------------------------------------------------------------------
/visualize_quantitative/Fig7_pgf_plots_individual.py:
--------------------------------------------------------------------------------
1 | import matplotlib.pyplot as plt
2 | import seaborn as sns
3 | import pandas as pd
4 | import numpy as np
5 | from torchvision.io import read_image
6 | from torchvision.transforms import Resize, Compose, ToTensor, Normalize
7 | from os import path, listdir
8 | from os.path import join
9 | from glob import glob
10 | import random
11 | import torch
12 |
13 | from utils.model_utils import set_seed
14 |
15 | import matplotlib as mpl
16 |
17 | mpl.use("Agg")
18 |
19 | if __name__ == "__main__":
20 |
21 | def f(lam, a, p):
22 | return (1 - lam) * a + lam * (1 - p)
23 |
24 | labels = [
25 | "BDQ \cite{bqn}",
26 | "ALF \cite{wu2020privacy}",
27 | "ELR\cite{ryoo2018extremelowres} s=2",
28 | "ELR\cite{ryoo2018extremelowres} s=4",
29 | "ELR\cite{ryoo2018extremelowres} s=8",
30 | "ELR\cite{ryoo2018extremelowres} s=16",
31 | "ELR\cite{ryoo2018extremelowres} s=32",
32 | "ELR\cite{ryoo2018extremelowres} s=64",
33 | "Ours \\textdagger",
34 | "Ours",
35 | ]
36 | ipn_action = [81, 76, 82.31, 81.76, 79.48, 70.82, 52.96, 31.63, 87.11, 83.15]
37 | ipn_privacy = [59, 65, 80.04, 72.01, 70.08, 64.32, 63.29, 62.7, 55.38, 54.12]
38 |
39 | kth_action = [91.11, 85.89, 91.64, 92.99, 91.22, 91.22, 85.57, 56.21, 88.67, 82.70]
40 | kth_privacy = [7.15, 19.27, 91.82, 92.50, 91.58, 88.86, 82.56, 58.35, 5.46, 4.31]
41 |
42 | sbu_action = [84.04, 82.00, 97.93, 98.27, 98.47, 96.27, 92.42, 80.05, 84.04, 86.74]
43 | sbu_privacy = [34.18, 48.00, 85.1, 91.48, 84.04, 82.97, 64.89, 43.61, 11.7, 13.19]
44 |
45 | # for lbl, a, p in zip(labels, ipn_action, ipn_privacy):
46 | # for lbl, a, p in zip(labels, kth_action, kth_privacy):
47 | for lbl, a, p in zip(labels, sbu_action, sbu_privacy):
48 | if lbl in [
49 | "ELR\cite{ryoo2018extremelowres} s=2",
50 | "ELR\cite{ryoo2018extremelowres} s=4",
51 | "ELR\cite{ryoo2018extremelowres} s=8",
52 | "ELR\cite{ryoo2018extremelowres} s=16",
53 | # "ELR\cite{ryoo2018extremelowres} s=32",
54 | # "ELR\cite{ryoo2018extremelowres} s=64",
55 | ]:
56 | continue
57 | lam = np.linspace(0, 1, 2)
58 |
59 | if lbl in ["Ours \\textdagger", "Ours"]:
60 | print("\\addplot+[style={mark=square*,ultra thick}]")
61 | else:
62 | print("\\addplot+[]")
63 | print("coordinates {")
64 | for l in lam:
65 | y = f(l, a / 100, p / 100)
66 | print(f"({100*l:.2f}, {100*y:.2f})", end="")
67 | print("};")
68 | print("\\addlegendentry{%s}" % lbl)
69 | print()
70 |
--------------------------------------------------------------------------------
/visualize_quantitative/load_and_plot_confusionmatrix.py:
--------------------------------------------------------------------------------
1 | from utils.ConfusionMatrix import ConfusionMatrix
2 | import pickle
3 | import matplotlib.pyplot as plt
4 |
5 | with open(
6 | "runs/kth/x3d_m__16x5____pretrained__True____train_backbone__True/0000_2023-09-06_17-38-15/confusionmatrix/data_labels.pkl",
7 | "rb",
8 | ) as f:
9 | labels = pickle.load(f)
10 | with open(
11 | "runs/kth/x3d_m__16x5____pretrained__True____train_backbone__True/0000_2023-09-06_17-38-15/confusionmatrix/data_test_confusion_00079.pkl",
12 | "rb",
13 | ) as f:
14 | cm = pickle.load(f)
15 |
16 |
17 | M = ConfusionMatrix(len(labels), labels)
18 | M.mat = cm
19 |
20 | a = M.as_img(fontsize=10, label_angle=90, display_values=False)
21 | plt.imshow(a.permute(1, 2, 0))
22 | plt.show()
23 |
--------------------------------------------------------------------------------
/yolo/create_masks_ipn_or_kth.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import glob
3 | import os
4 | from os.path import join
5 |
6 | import torch
7 | import torch.nn.functional as F
8 | from torchvision.datasets.utils import list_dir
9 | from torchvision.io import write_jpeg
10 | from tqdm import tqdm
11 | from ultralytics import YOLO
12 |
13 | parser = argparse.ArgumentParser(description="Create masks for IPN dataset")
14 | parser.add_argument("--src", type=str, default="data/ipn")
15 | parser.add_argument("--dst", type=str, default="data/ipn_masks")
16 | args = parser.parse_args()
17 |
18 | src = args.src
19 | dst = args.dst
20 | classes = list(sorted(list_dir(src)))
21 |
22 |
23 | def check(path1, path2, clz):
24 | if len(glob.glob(path1)) == len(glob.glob(path2)):
25 | print(f"[ OK ]")
26 | return
27 | else:
28 | print(f"[ FAIL ]")
29 | return clz
30 |
31 |
32 | def check_integrity(src_dir, dst_dir):
33 | src_images = sorted(glob.glob(f"{src_dir}/*.png")) + sorted(
34 | glob.glob(f"{src_dir}/*.jpg")
35 | )
36 | dst_images = sorted(glob.glob(f"{dst_dir}/*.png")) + sorted(
37 | glob.glob(f"{dst_dir}/*.jpg")
38 | )
39 |
40 | assert len(src_images) == len(dst_images)
41 |
42 |
43 | def process_class(clz):
44 | model = YOLO("yolo/yolov8n-seg.pt")
45 | print(f"Processing {clz}")
46 | os.makedirs(f"{dst}/{clz}", exist_ok=True)
47 |
48 | image_list = sorted(glob.glob(f"{src}/{clz}/*png")) + sorted(
49 | glob.glob(f"{src}/{clz}/*jpg")
50 | )
51 | for image in image_list:
52 | dst_image = f"{dst}/{clz}/{os.path.basename(image)}"
53 | results = model(source=image, classes=0, verbose=False, stream=True)
54 |
55 | all_masks = extract_masks_from_results(results)
56 | write_jpeg(
57 | (torch.stack(all_masks).repeat(3, 1, 1) * 255).to(torch.uint8),
58 | dst_image,
59 | )
60 | check_integrity(join(src, clz), join(dst, clz))
61 |
62 |
63 | def extract_masks_from_results(results):
64 | all_masks = []
65 | for r in results:
66 | if r.masks is not None:
67 | mask = torch.from_numpy(r.masks.cpu().numpy().data).mean(0) > 0
68 | mask = F.interpolate(
69 | mask.unsqueeze(0).unsqueeze(0).float(),
70 | size=r.orig_shape,
71 | mode="nearest",
72 | ).squeeze()
73 |
74 | else:
75 | mask = torch.zeros(r.orig_shape) > 0 # empty mask
76 | all_masks.append(mask)
77 | return all_masks
78 |
79 |
80 | for clz in tqdm(sorted(classes)):
81 | process_class(clz)
82 |
--------------------------------------------------------------------------------
/yolo/create_masks_sbu.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import glob
3 | import os
4 |
5 | import torch
6 | import torch.nn.functional as F
7 | from torchvision.io import write_jpeg
8 | from tqdm import tqdm
9 | from ultralytics import YOLO
10 |
11 | parser = argparse.ArgumentParser(description="Create masks for IPN dataset")
12 | parser.add_argument("--src", type=str, default="data/sbu")
13 | parser.add_argument("--dst", type=str, default="data/sbu_masks")
14 | args = parser.parse_args()
15 |
16 | src = args.src
17 | dst = args.dst
18 | classes = list(sorted(glob.glob(f"{src}/**/**/**/")))
19 |
20 |
21 | def check(path1, path2, clz):
22 | if len(glob.glob(path1)) == len(glob.glob(path2)):
23 | print(f"[ OK ]")
24 | return
25 | else:
26 | print(f"[ FAIL ]")
27 | return clz
28 |
29 |
30 | def check_integrity(src_dir, dst_dir):
31 | src_images = sorted(glob.glob(f"{src_dir}/rgb_*.png"))
32 | dst_images = sorted(glob.glob(f"{dst_dir}/*.jpg"))
33 |
34 | assert len(src_images) == len(dst_images)
35 |
36 |
37 | def process_class(clz):
38 |
39 | model = YOLO("yolov8n-seg.pt")
40 | dstdir = clz.replace(src, dst)
41 | os.makedirs(f"{dstdir}", exist_ok=True)
42 |
43 | image_list = sorted(glob.glob(f"{clz}/rgb_*png"))
44 | for image in image_list:
45 | dst_image = f"{dstdir}/{os.path.basename(image).replace('png', 'jpg')}"
46 | results = model(source=image, classes=0, verbose=False, stream=True)
47 |
48 | all_masks = extract_masks_from_results(results)
49 | write_jpeg(
50 | (torch.stack(all_masks).repeat(3, 1, 1) * 255).to(torch.uint8),
51 | dst_image,
52 | )
53 |
54 | check_integrity(clz, dstdir)
55 |
56 |
57 | def extract_masks_from_results(results):
58 | all_masks = []
59 | for r in results:
60 | if r.masks is not None:
61 | mask = torch.from_numpy(r.masks.cpu().numpy().data).mean(0) > 0
62 | mask = F.interpolate(
63 | mask.unsqueeze(0).unsqueeze(0).float(),
64 | size=r.orig_shape,
65 | mode="nearest",
66 | ).squeeze()
67 |
68 | else:
69 | mask = torch.zeros(r.orig_shape) > 0 # empty mask
70 | all_masks.append(mask)
71 | return all_masks
72 |
73 |
74 | for clz in tqdm(sorted(classes)):
75 | process_class(clz)
76 |
--------------------------------------------------------------------------------
/yolo/yolov8n-seg.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/yolo/yolov8n-seg.pt
--------------------------------------------------------------------------------
/yolo/yolov8n.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/f-ilic/SelectivePrivacyPreservation/7e8b98d184b124234f7ae4a0d3c2b1412facdb73/yolo/yolov8n.pt
--------------------------------------------------------------------------------