├── _config.yml
├── README.md
├── custom_datasets.py
├── metrics_on_folder.py
├── metrics_on_video.py
├── vocToCoco_unity_synthetic.py
├── vocToCoco_simulacro.py
├── index.md
├── batch_experiments.py
└── LICENSE.md
/_config.yml:
--------------------------------------------------------------------------------
1 | theme: jekyll-theme-cayman
2 | title: "Real-time gun detection in CCTV : An open problem"
3 | description: " "
4 | google_analytics: UA-178393192-1
5 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Real-time gun detection in CCTV : An open problem
2 | [](https://creativecommons.org/licenses/by-nc/4.0/)
3 |
4 | All the information can be found in the following link: [https://deepknowledge-us.github.io/US-Real-time-gun-detection-in-CCTV-An-open-problem-dataset/](https://deepknowledge-us.github.io/US-Real-time-gun-detection-in-CCTV-An-open-problem-dataset/).
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 | ## Terms of use
14 | This dataset can be used for academic research free of charge, citing the paper as we explain below. If you seek to use the data for commercial purposes please [contact us](mailto:jaalvarez@us.es).
15 |
16 |
17 | ## Citation
18 | If you use our dataset, please kindly cite the following paper: Real-time gun detection in CCTV: An open problem. Neural Networks (2020), doi: [https://doi.org/10.1016/j.neunet.2020.09.013](https://doi.org/10.1016/j.neunet.2020.09.013).
19 |
20 | ```
21 | @article{SalazarGonzalez2020,
22 | title = "Real-time gun detection in CCTV: An open problem",
23 | journal = "Neural Networks",
24 | year = "2020",
25 | issn = "0893-6080",
26 | doi = "https://doi.org/10.1016/j.neunet.2020.09.013",
27 | url = "http://www.sciencedirect.com/science/article/pii/S0893608020303361",
28 | author = "Salazar Gonz{\'{a}}lez, Jose L. and Zaccaro, Carlos and {\'{A}}lvarez-Garc{\'{i}}a, Juan A. and Soria-Morillo, Luis M. and Sancho Caparrini, Fernando",
29 | }
30 | ```
31 |
32 | ## License
33 |
34 | This work is licensed under a
35 | [Creative Commons Attribution-NonCommercial 4.0 International License][cc-by-nc]. Contact the authors of this work for commercial use.
36 |
37 | [![CC BY NC 4.0][cc-by-nc-image]][cc-by-nc]
38 |
39 | [cc-by-nc]: http://creativecommons.org/licenses/by-nc/4.0/
40 | [cc-by-nc-image]: https://i.creativecommons.org/l/by-nc/4.0/88x31.png
41 |
--------------------------------------------------------------------------------
/custom_datasets.py:
--------------------------------------------------------------------------------
1 |
2 | from detectron2.data import MetadataCatalog, DatasetCatalog
3 | from detectron2.data.datasets import load_coco_json, register_coco_instances
4 |
5 | def loadDatasets():
6 | classRemapping = {1: 0}
7 | meta = {'thing_classes': ['gun'], 'thing_dataset_id_to_contiguous_id': classRemapping}
8 |
9 | def register_coco_instances(name, metadata, classRemapping, json_file, image_root):
10 | DatasetCatalog.register(name, lambda: load_coco_json(json_file, image_root, None, classRemapping=classRemapping))
11 | MetadataCatalog.get(name).set(
12 | json_file=json_file, image_root=image_root, evaluator_type="coco", **metadata
13 | )
14 |
15 | ROOT_PATH = '/media/datos/shared_datasets/simulacro_guns'
16 | register_coco_instances("guns_simulacro_1", meta, classRemapping, json_file=f'{ROOT_PATH}/cam1.json', image_root=f'{ROOT_PATH}/images')
17 | register_coco_instances("guns_simulacro_5", meta, classRemapping, json_file=f'{ROOT_PATH}/cam5.json', image_root=f'{ROOT_PATH}/images')
18 | register_coco_instances("guns_simulacro_7", meta, classRemapping, json_file=f'{ROOT_PATH}/cam7.json', image_root=f'{ROOT_PATH}/images')
19 | register_coco_instances("guns_simulacro_all", meta, classRemapping, json_file=f'{ROOT_PATH}/all_cams.json', image_root=f'{ROOT_PATH}/images')
20 |
21 | ROOT_PATH = '/media/datos/shared_datasets/guns_edgecase'
22 | register_coco_instances("guns_edgecase", meta, classRemapping, json_file=f'{ROOT_PATH}/synthetic_train.json', image_root=f'{ROOT_PATH}/images')
23 |
24 | ROOT_PATH = '/media/datos/shared_datasets/guns_granada'
25 | register_coco_instances("guns_granada_train", meta, classRemapping, json_file=f'{ROOT_PATH}/real_train.json', image_root=f'{ROOT_PATH}/images')
26 | register_coco_instances("guns_granada_test", meta, classRemapping, json_file=f'{ROOT_PATH}/real_val.json', image_root=f'{ROOT_PATH}/images')
27 |
28 | ROOT_PATH = '/media/datos/shared_datasets/unity_syntectic_victory/split-500'
29 | register_coco_instances("unity_synthetic_500", meta, classRemapping, json_file=f'{ROOT_PATH}/split_coco.json', image_root=f'{ROOT_PATH}')
30 | ROOT_PATH = '/media/datos/shared_datasets/unity_syntectic_victory/split-1000'
31 | register_coco_instances("unity_synthetic_1000", meta, classRemapping, json_file=f'{ROOT_PATH}/split_coco.json', image_root=f'{ROOT_PATH}')
32 | ROOT_PATH = '/media/datos/shared_datasets/unity_syntectic_victory/split-2500'
33 | register_coco_instances("unity_synthetic_2500", meta, classRemapping, json_file=f'{ROOT_PATH}/split_coco.json', image_root=f'{ROOT_PATH}')
34 | ROOT_PATH = '/media/datos/shared_datasets/unity_syntectic_victory/split-5000'
35 | register_coco_instances("unity_synthetic_5000", meta, classRemapping, json_file=f'{ROOT_PATH}/split_coco.json', image_root=f'{ROOT_PATH}')
--------------------------------------------------------------------------------
/metrics_on_folder.py:
--------------------------------------------------------------------------------
1 | import random
2 | import os
3 | import json
4 | import glob
5 |
6 | CREATE_ANNOS = True
7 | CALCULATE_METRICS = True
8 | THS = [0.99, 0.98, 0.95]
9 |
10 | dstDir = '/mnt/datos/custom_datasets/simulacro/no_guns/Simulacro_guns_annos'
11 | uris = glob.glob('/media/datos/shared_datasets/guns_granada/Test/*')
12 |
13 |
14 | # def create_annos(cfg, dstPath, uris):
15 | #
16 | if CREATE_ANNOS:
17 | os.environ["CUDA_VISIBLE_DEVICES"] = "2"
18 |
19 | from detectron2.utils.logger import setup_logger
20 |
21 | setup_logger()
22 |
23 | import cv2 as cv
24 | from detectron2.engine import DefaultPredictor
25 | from detectron2.config import get_cfg
26 | cfg = get_cfg()
27 | if True:
28 | cfg.merge_from_file("../../build/configs/COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml")
29 | cfg.MODEL.WEIGHTS = "detectron2://COCO-Detection/faster_rcnn_R_50_FPN_3x/137849458/model_final_280758.pkl" # initialize from model zoo
30 | else:
31 | cfg.merge_from_file("../../build/configs/COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml")
32 | cfg.MODEL.WEIGHTS = "detectron2://COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x/139173657/model_final_68b088.pkl" # initialize from model zoo
33 |
34 | cfg.INPUT.MAX_SIZE_TEST = 2500
35 | cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1
36 | cfg.DATALOADER.NUM_WORKERS = 2
37 |
38 | cfg.MODEL.WEIGHTS = "/mnt/datos/experiments/guns_detection/faster_rcnn_R_50_FPN_1x[LR=0.002][('guns_granada_train')]FROM[faster_rcnn_R_50_FPN_1x[LR=0.002][('guns_edgecase',)]]/model_final.pth"
39 |
40 | predictor = DefaultPredictor(cfg)
41 |
42 | for uri in uris[:]:
43 | print(uri)
44 | cap = cv.VideoCapture(uri)
45 | frameIdx = 0
46 | imagesAnnos = []
47 | while True:
48 | for i in range(1):
49 | ret, img = cap.read()
50 | if not ret or not cap.isOpened():
51 | break
52 |
53 | preds = predictor(img)["instances"].to("cpu")
54 |
55 | bboxes = [b.numpy().tolist() for b in preds.pred_boxes] if preds.pred_boxes else []
56 | scores = preds.scores.numpy().tolist()
57 | for i in range(len(bboxes)):
58 | bboxes[i].append(scores[i])
59 | imagesAnnos.append({'frameIdx': frameIdx, 'bboxes': bboxes})
60 |
61 | frameIdx += 1
62 | if frameIdx % 10 == 0:
63 | print(frameIdx)
64 |
65 | with open(os.path.join(dstDir, os.path.splitext(os.path.basename(uri))[0]) + '_annos.json', 'w') as f:
66 | print(imagesAnnos)
67 | json.dump(imagesAnnos, f)
68 |
69 | if CALCULATE_METRICS:
70 | for th in THS:
71 | metrics = {}
72 | for uri in uris[:]:
73 | basename = os.path.splitext(os.path.basename(uri))[0]
74 | with open(os.path.join(dstDir, basename) + '_annos.json') as f:
75 | imagesAnnos = json.load(f)
76 | numDetections = len([bbox for anno in imagesAnnos for bbox in anno['bboxes'] if bbox[4] >= th])
77 | metrics[basename] = {'FP': numDetections, 'NUM_FRAMES': len(imagesAnnos)}
78 |
79 | with open(os.path.join(dstDir, f'metrics_summary_th-{th}.json'), 'w') as f:
80 | json.dump(metrics, f)
81 |
82 | detailList = ', '.join([f"{v['FP']}" for k, v in metrics.items()])
83 | numFrames = sum([v['NUM_FRAMES'] for k, v in metrics.items()])
84 | print(f"FP{th}: {sum([m['FP'] for m in metrics.values()])}/{numFrames} ({detailList})")
85 |
--------------------------------------------------------------------------------
/metrics_on_video.py:
--------------------------------------------------------------------------------
1 | import random
2 | import os
3 | import json
4 | import glob
5 | import cv2 as cv
6 |
7 | CREATE_ANNOS = True
8 | CALCULATE_METRICS = True
9 | THS = [0.99, 0.98, 0.95]
10 |
11 | dstDir = '/mnt/datos/custom_datasets/simulacro/no_guns/Simulacro_guns_annos'
12 | uris = glob.glob('/mnt/datos/custom_datasets/simulacro/no_guns/Simulacro/*.mp4')
13 |
14 | VIS_RESULT = True
15 | confidence = 0.99
16 | def drawAnnos(annoGuns, img, fname):
17 | for gun in annoGuns:
18 | (x0, y0, x1, y1, c) = gun
19 | x1, y1 = int(x1), int(y1)
20 | x0, y0 = int(x0), int(y0)
21 | if c >= confidence:
22 | cv.rectangle(img, (x0, y0), (x1, y1), (0, 255, 0), thickness=2)
23 | cv.putText(img, str(int(c*100)), (x0, y0-20), cv.FONT_HERSHEY_TRIPLEX, 0.5, (0, 0, 255), 1)
24 |
25 | # def create_annos(cfg, dstPath, uris):
26 | #
27 | if CREATE_ANNOS:
28 | os.environ["CUDA_VISIBLE_DEVICES"] = "2"
29 |
30 | from detectron2.utils.logger import setup_logger
31 |
32 | setup_logger()
33 |
34 | import cv2 as cv
35 | from detectron2.engine import DefaultPredictor
36 | from detectron2.config import get_cfg
37 | cfg = get_cfg()
38 | if True:
39 | cfg.merge_from_file("../../build/configs/COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml")
40 | cfg.MODEL.WEIGHTS = "detectron2://COCO-Detection/faster_rcnn_R_50_FPN_3x/137849458/model_final_280758.pkl" # initialize from model zoo
41 | else:
42 | cfg.merge_from_file("../../build/configs/COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml")
43 | cfg.MODEL.WEIGHTS = "detectron2://COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x/139173657/model_final_68b088.pkl" # initialize from model zoo
44 |
45 | cfg.INPUT.MAX_SIZE_TEST = 2500
46 | cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1
47 | cfg.DATALOADER.NUM_WORKERS = 2
48 |
49 | cfg.MODEL.WEIGHTS = "/mnt/datos/experiments/guns_detection/faster_rcnn_R_50_FPN_1x[LR=0.002][('guns_granada_train', 'guns_simulacro_1', 'guns_simulacro_7')]FROM[faster_rcnn_R_50_FPN_1x[LR=0.002][('unity_synthetic_2500',)]]/model_final.pth"
50 |
51 | predictor = DefaultPredictor(cfg)
52 |
53 | testImgIdx = 0
54 | for uri in uris[:]:
55 | print(uri)
56 | cap = cv.VideoCapture(uri)
57 | frameIdx = 0
58 | imagesAnnos = []
59 | while True:
60 | for i in range(1):
61 | ret, img = cap.read()
62 | if not ret or not cap.isOpened():
63 | break
64 |
65 | preds = predictor(img)["instances"].to("cpu")
66 |
67 | bboxes = [b.numpy().tolist() for b in preds.pred_boxes] if preds.pred_boxes else []
68 | scores = preds.scores.numpy().tolist()
69 | for i in range(len(bboxes)):
70 | bboxes[i].append(scores[i])
71 | if VIS_RESULT:
72 | if any([bbox[4] >= confidence for bbox in bboxes]):
73 | drawAnnos(bboxes, img, '')
74 | cv.imwrite(os.path.join('fp_no_guns_detections', f'img{testImgIdx}.jpg'), img)
75 | testImgIdx+=1
76 | # factor = .5
77 | # img = cv.resize(img, (0, 0), fx=factor, fy=factor)
78 | # cv.imshow('win', img)
79 | # k = cv.waitKey(0)
80 | imagesAnnos.append({'frameIdx': frameIdx, 'bboxes': bboxes})
81 |
82 | frameIdx += 1
83 | if frameIdx % 10 == 0:
84 | print(f"frameIdx: {frameIdx}, boxes: {len(bboxes)}")
85 |
86 | # with open(os.path.join(dstDir, os.path.splitext(os.path.basename(uri))[0]) + '_annos.json', 'w') as f:
87 | # print(imagesAnnos)
88 | # json.dump(imagesAnnos, f)
89 |
90 | if CALCULATE_METRICS:
91 | for th in THS:
92 | metrics = {}
93 | for uri in uris[:]:
94 | basename = os.path.splitext(os.path.basename(uri))[0]
95 | with open(os.path.join(dstDir, basename) + '_annos.json') as f:
96 | imagesAnnos = json.load(f)
97 | numDetections = len([bbox for anno in imagesAnnos for bbox in anno['bboxes'] if bbox[4] >= th])
98 | metrics[basename] = {'FP': numDetections, 'NUM_FRAMES': len(imagesAnnos)}
99 |
100 | with open(os.path.join(dstDir, f'metrics_summary_th-{th}.json'), 'w') as f:
101 | json.dump(metrics, f)
102 |
103 | detailList = ', '.join([f"{v['FP']}" for k, v in metrics.items()])
104 | numFrames = sum([v['NUM_FRAMES'] for k, v in metrics.items()])
105 | print(f"FP{th}: {sum([m['FP'] for m in metrics.values()])}/{numFrames} ({detailList})")
106 |
--------------------------------------------------------------------------------
/vocToCoco_unity_synthetic.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 |
3 | # pip install lxml
4 |
5 | import sys
6 | import os
7 | import json
8 | import xml.etree.ElementTree as ET
9 | import glob
10 | import pickle
11 |
12 | START_BOUNDING_BOX_ID = 1
13 | PRE_DEFINE_CATEGORIES = {'Pistol': 1}
14 |
15 | def get(root, name):
16 | vars = root.findall(name)
17 | return vars
18 |
19 |
20 | def get_and_check(root, name, length):
21 | vars = root.findall(name)
22 | if len(vars) == 0:
23 | raise ValueError("Can not find %s in %s." % (name, root.tag))
24 | if length > 0 and len(vars) != length:
25 | raise ValueError(
26 | "The size of %s is supposed to be %d, but is %d."
27 | % (name, length, len(vars))
28 | )
29 | if length == 1:
30 | vars = vars[0]
31 | return vars
32 |
33 |
34 | def get_categories(xml_files):
35 | classes_names = []
36 | for xml_file in xml_files:
37 | tree = ET.parse(xml_file)
38 | root = tree.getroot()
39 | for member in root.findall("object"):
40 | classes_names.append(member[0].text)
41 | classes_names = list(set(classes_names))
42 | classes_names.sort()
43 | return {name: i for i, name in enumerate(classes_names)}
44 |
45 |
46 | def convert(xml_files, json_file):
47 | json_dict = {"images": [], 'categories': [], "annotations": [], "type": "instances"}
48 |
49 | if PRE_DEFINE_CATEGORIES is not None:
50 | categories = PRE_DEFINE_CATEGORIES
51 | else:
52 | categories = get_categories(xml_files)
53 | bnd_id = START_BOUNDING_BOX_ID
54 | for xml_file in sorted(xml_files):
55 | tree = ET.parse(xml_file)
56 | root = tree.getroot()
57 | path = get(root, "path")
58 | if len(path) == 1:
59 | filename = os.path.basename(path[0].text)
60 | elif len(path) == 0:
61 | filename = get_and_check(root, "filename", 1).text
62 | else:
63 | raise ValueError("%d paths found in %s" % (len(path), xml_file))
64 |
65 | fname = filename.split('\\')[-1]
66 |
67 | objects = []
68 | for obj in get(root, "object"):
69 | category = get_and_check(obj, "name", 1).text
70 | # TODO: Change to match edgecase dataset
71 | if category == 'Knife':
72 | continue
73 | else:
74 | category = 'Pistol'
75 |
76 | if category not in categories:
77 | new_id = len(categories)
78 | categories[category] = new_id
79 | category_id = categories[category]
80 | bndbox = get_and_check(obj, "bndbox", 1)
81 | xmin = int(float(get_and_check(bndbox, "xmin", 1).text)) - 1
82 | ymin = int(float(get_and_check(bndbox, "ymin", 1).text)) - 1
83 | xmax = int(float(get_and_check(bndbox, "xmax", 1).text))
84 | ymax = int(float(get_and_check(bndbox, "ymax", 1).text))
85 | assert xmax > xmin
86 | assert ymax > ymin
87 | o_width = abs(xmax - xmin)
88 | o_height = abs(ymax - ymin)
89 | ann = {
90 | "area": int(o_width * o_height),
91 | "iscrowd": 0,
92 | "image_id": fname,
93 | "bbox": [xmin, ymin, o_width, o_height],
94 | "category_id": category_id,
95 | "ignore": 0,
96 | "segmentation": [],
97 | "id": bnd_id,
98 | }
99 | json_dict["annotations"].append(ann)
100 | objects.append(ann)
101 | bnd_id = bnd_id + 1
102 |
103 | size = get_and_check(root, "size", 1)
104 | width = int(float(get_and_check(size, "width", 1).text))
105 | height = int(float(get_and_check(size, "height", 1).text))
106 | image = {
107 | "file_name": fname,
108 | "height": height,
109 | "width": width,
110 | #"objects": objects,
111 | "id": fname,
112 | }
113 | json_dict["images"].append(image)
114 |
115 | for cate, cid in categories.items():
116 | cat = {"supercategory": "none", "id": cid, "name": cate}
117 | json_dict["categories"].append(cat)
118 |
119 | os.makedirs(os.path.dirname(json_file), exist_ok=True)
120 | with open(json_file, "w") as f:
121 | json.dump(json_dict, f)
122 |
123 |
124 | if __name__ == "__main__":
125 | from glob import glob
126 |
127 | for folder in [
128 | '/media/datos/shared_datasets/unity_syntectic_victory/split-500',
129 | '/media/datos/shared_datasets/unity_syntectic_victory/split-1000',
130 | '/media/datos/shared_datasets/unity_syntectic_victory/split-2500',
131 | '/media/datos/shared_datasets/unity_syntectic_victory/split-5000',
132 | ]:
133 |
134 | name = os.path.basename(folder)
135 |
136 | allFiles = glob(os.path.join(folder, '*.xml'))
137 | fname = os.path.join(folder, f'split_coco.json')
138 | print("Number of xml files: {}".format(len(allFiles)))
139 | convert(allFiles, fname)
140 | print("Success: {}".format(fname))
--------------------------------------------------------------------------------
/vocToCoco_simulacro.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 |
3 | # pip install lxml
4 |
5 | import sys
6 | import os
7 | import json
8 | import xml.etree.ElementTree as ET
9 | import glob
10 | import pickle
11 |
12 | START_BOUNDING_BOX_ID = 1
13 | PRE_DEFINE_CATEGORIES = {'Pistol': 1}
14 |
15 | def get(root, name):
16 | vars = root.findall(name)
17 | return vars
18 |
19 |
20 | def get_and_check(root, name, length):
21 | vars = root.findall(name)
22 | if len(vars) == 0:
23 | raise ValueError("Can not find %s in %s." % (name, root.tag))
24 | if length > 0 and len(vars) != length:
25 | raise ValueError(
26 | "The size of %s is supposed to be %d, but is %d."
27 | % (name, length, len(vars))
28 | )
29 | if length == 1:
30 | vars = vars[0]
31 | return vars
32 |
33 |
34 | def get_categories(xml_files):
35 | classes_names = []
36 | for xml_file in xml_files:
37 | tree = ET.parse(xml_file)
38 | root = tree.getroot()
39 | for member in root.findall("object"):
40 | classes_names.append(member[0].text)
41 | classes_names = list(set(classes_names))
42 | classes_names.sort()
43 | return {name: i for i, name in enumerate(classes_names)}
44 |
45 |
46 | def convert(xml_files, json_file):
47 | json_dict = {"images": [], 'categories': [], "annotations": [], "type": "instances"}
48 |
49 | if PRE_DEFINE_CATEGORIES is not None:
50 | categories = PRE_DEFINE_CATEGORIES
51 | else:
52 | categories = get_categories(xml_files)
53 | bnd_id = START_BOUNDING_BOX_ID
54 | for xml_file in sorted(xml_files):
55 | tree = ET.parse(xml_file)
56 | root = tree.getroot()
57 | path = get(root, "path")
58 | if len(path) == 1:
59 | filename = os.path.basename(path[0].text)
60 | elif len(path) == 0:
61 | filename = get_and_check(root, "filename", 1).text
62 | else:
63 | raise ValueError("%d paths found in %s" % (len(path), xml_file))
64 |
65 | fname = filename.split('\\')[-1]
66 |
67 | objects = []
68 | for obj in get(root, "object"):
69 | category = get_and_check(obj, "name", 1).text
70 | # TODO: Change to match edgecase dataset
71 | if category == 'Knife':
72 | continue
73 | else:
74 | category = 'Pistol'
75 |
76 | if category not in categories:
77 | new_id = len(categories)
78 | categories[category] = new_id
79 | category_id = categories[category]
80 | bndbox = get_and_check(obj, "bndbox", 1)
81 | xmin = int(float(get_and_check(bndbox, "xmin", 1).text)) - 1
82 | ymin = int(float(get_and_check(bndbox, "ymin", 1).text)) - 1
83 | xmax = int(float(get_and_check(bndbox, "xmax", 1).text))
84 | ymax = int(float(get_and_check(bndbox, "ymax", 1).text))
85 | assert xmax > xmin
86 | assert ymax > ymin
87 | o_width = abs(xmax - xmin)
88 | o_height = abs(ymax - ymin)
89 | ann = {
90 | "area": int(o_width * o_height),
91 | "iscrowd": 0,
92 | "image_id": fname,
93 | "bbox": [xmin, ymin, o_width, o_height],
94 | "category_id": category_id,
95 | "ignore": 0,
96 | "segmentation": [],
97 | "id": bnd_id,
98 | }
99 | json_dict["annotations"].append(ann)
100 | objects.append(ann)
101 | bnd_id = bnd_id + 1
102 |
103 | size = get_and_check(root, "size", 1)
104 | width = int(float(get_and_check(size, "width", 1).text))
105 | height = int(float(get_and_check(size, "height", 1).text))
106 | image = {
107 | "file_name": fname,
108 | "height": height,
109 | "width": width,
110 | #"objects": objects,
111 | "id": fname,
112 | }
113 | json_dict["images"].append(image)
114 |
115 | for cate, cid in categories.items():
116 | cat = {"supercategory": "none", "id": cid, "name": cate}
117 | json_dict["categories"].append(cat)
118 |
119 | os.makedirs(os.path.dirname(json_file), exist_ok=True)
120 | with open(json_file, "w") as f:
121 | json.dump(json_dict, f)
122 |
123 |
124 | if __name__ == "__main__":
125 | from glob import glob
126 | import shutil
127 | dirs = [
128 | '/home/datos/Downloads/Images',
129 | ]
130 |
131 | RAW_DATASET = '/home/datos/Downloads/Images'
132 | ROOT_DATASET_FOLDER = '/media/datos/shared_datasets/simulacro_guns'
133 |
134 | allFiles = glob(os.path.join(RAW_DATASET, '*.jpg'))
135 | jpgDir = os.path.join(ROOT_DATASET_FOLDER, 'allCams')
136 | os.makedirs(jpgDir, exist_ok=True)
137 | for f in allFiles:
138 | if not os.path.exists(os.path.join(jpgDir, f)):
139 | shutil.copy(f, jpgDir)
140 |
141 | matchTuples = [('*.xml', 'all_cams') , ('Cam1*.xml', 'cam1'), ('Cam5*.xml', 'cam5'), ('Cam7*.xml', 'cam7')]
142 |
143 | for matchStr, datasetName in matchTuples:
144 | allFiles = glob(os.path.join(RAW_DATASET, matchStr))
145 | fname = os.path.join(ROOT_DATASET_FOLDER, f'{datasetName}.json')
146 | print("Number of xml files: {}".format(len(allFiles)))
147 | convert(allFiles, fname)
148 | print("Success: {}".format(fname))
--------------------------------------------------------------------------------
/index.md:
--------------------------------------------------------------------------------
1 | ## Abstract
2 | Object detectors have improved in recent years, obtaining better results and faster inference time. However, small object detection is still a problem that has not yet a definitive solution. The autonomous weapons detection on Closed-circuit television (CCTV) has been studied recently, being extremely useful in the field of security, counter-terrorism, and risk mitigation. This article presents a new dataset obtained from a real CCTV installed in a university and the generation of synthetic images, to which Faster R-CNN was applied using Feature Pyramid Network with ResNet-50 resulting in a weapon detection model able to be used in quasi real-time CCTV (90 ms of inference time with an NVIDIA GeForce GTX-1080Ti card) improving the state of the art on weapon detection in a two stages training. In this work, an exhaustive experimental study of the detector with these datasets was performed, showing the impact of synthetic datasets on the training of weapons detection systems, as well as the main limitations that these systems present nowadays. The generated synthetic dataset and the real CCTV dataset are available to the whole research community.
3 |
4 | ## Datasets
5 | This study presents two new datasets, being these: "Mock attack dataset" and "Unity synthetic dataset".
6 |
7 | ### Mock attack dataset
8 | This dataset has been manually annotated and collected during a mock attack, after obtaining all the permissions by our University and the security personnel. Details are presented below, indicating each of the cameras used during the mock attack and the scenarios they present.
9 |
10 | Infrastructure for data acquisition is composed of three surveillance cameras located at different places in the same area covering two different corridors and one entrance, forming different scenarios. The description of each camera is as follows:
11 |
12 | - Cam1: located in one of the two corridors, it presents some conflicting objects, such as doors or bins, and the lighting is uniform. The time of the video sequence for this camera is 40 min and 25 s. Selecting the five segments with movement, the total duration is 5 min and 4 s. These segments were manually annotated at 2 frames per second (FPS), resulting in a total of 607 frames.
13 |
14 | - Cam7: located in the other corridor, this camera presents similarities with Cam1, both in scenery and lighting. However, Cam7 presents more conflicting objects, such as a fire extinguisher or objects on the walls. The duration of the sequence for this camera is 1 h, 3 min and 34 s. Selecting the segments with movement, the total duration is 29 min and 16 s. These segments were annotated at 2 FPS, resulting in a total of 3511 frames.
15 |
16 | - Cam5: located at the entrance of a university module, presents some conflicting objects, such as a black carpet on the floor, and also irregular lighting with rays covering part of the scene. The duration of the video sequence for this camera is 39 min and 7 s. Choosing segments with movement, the time decreases to 8 min and 36 s. This sequence was annotated at 2 FPS, resulting in a total of 1031 frames.
17 |
18 |
19 |
20 |
21 |
22 | ### Unity synthetic dataset
23 | This dataset was generated by modeling in Unity Game Engine a scenario that emulates a part of a city and an educational center within it. Several cameras capture the movements of multiple characters, made up of 11 different models and 7 animations. These images enhance the generated datasets with 11 different objects: 4 types of handguns, 5 types of rifles, a knife, and a smartphone. This dataset consists of three splits with 500 (U0.5), 1000 (U1) and 2500 (U2.5) images.
24 |
25 |
26 |
27 |
28 |
29 | ## Download Datasets
30 | - Full dataset: [Hugging Face link](https://huggingface.co/datasets/jsalazar/US-Real-time-gun-detection-in-CCTV-An-open-problem-dataset){:target="_blank" rel="noopener"}.
31 | - Mock attack dataset: [US - Mock Attack Cam 1, 5, 7](https://huggingface.co/datasets/jsalazar/US-Real-time-gun-detection-in-CCTV-An-open-problem-dataset/resolve/main/weapons_images_2fps.zip){:target="_blank" rel="noopener"}.
32 | - Unity synthetic dataset: US - Unity Synthetic Dataset [U0.5](https://huggingface.co/datasets/jsalazar/US-Real-time-gun-detection-in-CCTV-An-open-problem-dataset/resolve/main/Unity/split-500.zip){:target="_blank" rel="noopener"}, [U1](https://huggingface.co/datasets/jsalazar/US-Real-time-gun-detection-in-CCTV-An-open-problem-dataset/resolve/main/Unity/split-1000.zip){:target="_blank" rel="noopener"} and [U2.5](https://huggingface.co/datasets/jsalazar/US-Real-time-gun-detection-in-CCTV-An-open-problem-dataset/resolve/main/Unity/split-2500.zip){:target="_blank" rel="noopener"}.
33 |
34 | ## Terms of use
35 | This dataset can be used for academic research free of charge, citing the paper as we explain below. If you seek to use the data for commercial purposes please [contact us](mailto:jaalvarez@us.es).
36 |
37 |
38 | ## Citation
39 | If you use our dataset, please kindly cite the following paper: Real-time gun detection in CCTV: An open problem. Neural Networks (2020), doi: [https://doi.org/10.1016/j.neunet.2020.09.013](https://doi.org/10.1016/j.neunet.2020.09.013){:target="_blank" rel="noopener"}.
40 |
41 | ```
42 | @article{SalazarGonzalez2020,
43 | title = "Real-time gun detection in CCTV: An open problem",
44 | journal = "Neural Networks",
45 | year = "2020",
46 | issn = "0893-6080",
47 | doi = "https://doi.org/10.1016/j.neunet.2020.09.013",
48 | url = "http://www.sciencedirect.com/science/article/pii/S0893608020303361",
49 | author = "Salazar Gonz{\'{a}}lez, Jose L. and Zaccaro, Carlos and {\'{A}}lvarez-Garc{\'{i}}a, Juan A. and Soria-Morillo, Luis M. and Sancho Caparrini, Fernando",
50 | }
51 | ```
52 |
53 |
54 | ## License
55 |
56 | This work is licensed under a
57 | [Creative Commons Attribution-NonCommercial 4.0 International License][cc-by-nc]. Contact the authors of this work for commercial use.
58 |
59 | [![CC BY NC 4.0][cc-by-nc-image]][cc-by-nc]
60 |
61 | [cc-by-nc]: http://creativecommons.org/licenses/by-nc/4.0/
62 | [cc-by-nc-image]: https://i.creativecommons.org/l/by-nc/4.0/88x31.png
63 |
--------------------------------------------------------------------------------
/batch_experiments.py:
--------------------------------------------------------------------------------
1 | #import torch; from torch.utils.cpp_extension import CUDA_HOME; print(torch.cuda.is_available(), CUDA_HOME)
2 | import random
3 | import os
4 | import json
5 | import argparse
6 | import shutil
7 | import re
8 | import openpyxl
9 | import glob
10 |
11 | parser = argparse.ArgumentParser(description='Process some integers.')
12 | parser.add_argument('id', type=int, action="store")
13 |
14 | args = parser.parse_args()
15 | import os
16 | os.environ["CUDA_VISIBLE_DEVICES"]=f"{args.id}"
17 |
18 | from detectron2.engine import launch
19 | import detectron2
20 | from detectron2.utils.logger import setup_logger
21 | setup_logger()
22 |
23 | # import some common libraries
24 | import matplotlib.pyplot as plt
25 | import numpy as np
26 | import cv2 as cv
27 | import importlib.util
28 |
29 | from detectron2.checkpoint import DetectionCheckpointer, PeriodicCheckpointer
30 | from detectron2.engine import DefaultTrainer
31 | from detectron2.config import get_cfg
32 |
33 | # import some common detectron2 utilities
34 | from detectron2.engine import DefaultPredictor
35 | from detectron2.config import get_cfg
36 | from detectron2.utils.visualizer import Visualizer
37 | from detectron2.data import MetadataCatalog, DatasetCatalog
38 | from detectron2.data.datasets import load_coco_json, register_coco_instances
39 | from detectron2.evaluation import COCOEvaluator, inference_on_dataset
40 | from detectron2.evaluation.coco_evaluation import _evaluate_predictions_on_coco
41 |
42 | from detectron2.data import build_detection_test_loader
43 | from detectron2.modeling import build_model
44 |
45 | spec = importlib.util.spec_from_file_location("custom_datasets", os.path.join(os.path.dirname(__file__), 'custom_datasets.py'))
46 | custom_datasets = importlib.util.module_from_spec(spec)
47 | spec.loader.exec_module(custom_datasets)
48 |
49 | DATASETS_REAL = ['guns_granada_train', 'guns_simulacro_1', 'guns_simulacro_7']
50 | DATASETS_SYTH = ['guns_edgecase', 'unity_synthetic_500', 'unity_synthetic_1000', 'unity_synthetic_2500', 'unity_synthetic_5000']
51 |
52 |
53 | SHOW_DATASET = False
54 | TRAIN = False
55 | SAVE_DATASET_PREDICTIONS = False
56 | SHOW_PREVIEW = False
57 | PREVIEW_ON_DIR = False
58 | MAKE_ANNOS = False
59 | SHOW_EVALUATION = True
60 |
61 | class JsonCOCOEvaluator(COCOEvaluator):
62 | def __init__(self, *arg, **kwargs):
63 | super().__init__(*arg, **kwargs)
64 | with open(os.path.join(self._output_dir, 'coco_instances_results.json')) as f:
65 | self._coco_results = json.load(f)
66 |
67 | def evaluate(self, theshold):
68 | task ='bbox'
69 | coco_eval = (
70 | _evaluate_predictions_on_coco(
71 | self._coco_api, self._coco_results, task, kpt_oks_sigmas=self._kpt_oks_sigmas
72 | )
73 | if len(self._coco_results) > 0
74 | else None # cocoapi does not handle empty results very well
75 | )
76 | res = self._derive_coco_results(
77 | coco_eval, task, class_names=self._metadata.get("thing_classes")
78 | )
79 |
80 | FP = FN = TP = 0.
81 | prob_preds = []
82 | for cocoRes in coco_eval.evalImgs[:int(len(coco_eval.evalImgs)/4)]:
83 | if cocoRes is None:
84 | continue
85 | for det, score in zip(cocoRes['dtMatches'][0], cocoRes['dtScores']):
86 | # No gt match
87 | if det == 0.:
88 | prob_preds.append([0, score])
89 | if score >= theshold:
90 | FP += 1
91 | else:
92 | prob_preds.append([1, score])
93 | if score >= theshold:
94 | TP += 1
95 | else:
96 | FN += 1
97 |
98 | for gt in cocoRes['gtMatches'][0]:
99 | if gt == 0.:
100 | prob_preds.append([1, 0.])
101 | FN += 1
102 |
103 | res.update({'TP': TP, 'FP': FP, 'FN': FN, 'ProbPreds': prob_preds})
104 | return {task: res}
105 |
106 | custom_datasets.loadDatasets()
107 |
108 | EXPERIMENTS_OUTPUT_PATH = '/mnt/datos/experiments/guns_detection'
109 |
110 | networks = {
111 | 'faster_rcnn_R_50_FPN_1x': {
112 | 'cfg': '../../build/configs/COCO-Detection/faster_rcnn_R_50_FPN_1x.yaml',
113 | 'weights': 'detectron2://COCO-Detection/faster_rcnn_R_50_FPN_1x/137257794/model_final_b275ba.pkl'
114 | },
115 | 'faster101': {
116 | 'cfg': '../../build/configs/COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml',
117 | 'weights': 'detectron2://COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x/139173657/model_final_68b088.pkl'
118 | },
119 | }
120 |
121 | experiments = [
122 | {'steps': 40000, 'lr': 0.002, 'net': 'faster_rcnn_R_50_FPN_1x', 'train': ('guns_edgecase',), 'test': ("guns_granada_test",)},
123 | {'steps': 40000, 'lr': 0.002, 'net': 'faster_rcnn_R_50_FPN_1x', 'train': ('unity_synthetic_500',), 'test': ("guns_granada_test",),},
124 | {'steps': 40000, 'lr': 0.002, 'net': 'faster_rcnn_R_50_FPN_1x', 'train': ('unity_synthetic_1000',), 'test': ("guns_granada_test",),},
125 | {'steps': 40000, 'lr': 0.002, 'net': 'faster_rcnn_R_50_FPN_1x', 'train': ('unity_synthetic_2500',), 'test': ("guns_granada_test",),},
126 |
127 | {'steps': 80000, 'lr': 0.002, 'net': 'faster_rcnn_R_50_FPN_1x', 'train': ('guns_granada_train',), 'test': ('guns_granada_test',), 'from': f"{EXPERIMENTS_OUTPUT_PATH}/faster_rcnn_R_50_FPN_1x[LR=0.002][('guns_edgecase',)]/model_0039999.pth"},
128 | {'steps': 80000, 'lr': 0.002, 'net': 'faster_rcnn_R_50_FPN_1x', 'train': ('guns_granada_train',), 'test': ('guns_granada_test',), 'from': f"{EXPERIMENTS_OUTPUT_PATH}/faster_rcnn_R_50_FPN_1x[LR=0.002][('unity_synthetic_500',)]/model_0039999.pth"},
129 | {'steps': 80000, 'lr': 0.002, 'net': 'faster_rcnn_R_50_FPN_1x', 'train': ('guns_granada_train',), 'test': ('guns_granada_test',), 'from': f"{EXPERIMENTS_OUTPUT_PATH}/faster_rcnn_R_50_FPN_1x[LR=0.002][('unity_synthetic_1000',)]/model_0039999.pth"},
130 | {'steps': 80000, 'lr': 0.002, 'net': 'faster_rcnn_R_50_FPN_1x', 'train': ('guns_granada_train',), 'test': ('guns_granada_test',), 'from': f"{EXPERIMENTS_OUTPUT_PATH}/faster_rcnn_R_50_FPN_1x[LR=0.002][('unity_synthetic_2500',)]/model_0039999.pth"},
131 | {'steps': 80000, 'lr': 0.002, 'net': 'faster_rcnn_R_50_FPN_1x', 'train': ('guns_granada_train', 'guns_simulacro_1', 'guns_simulacro_7',), 'test': ("guns_granada_test",), 'from': f"{EXPERIMENTS_OUTPUT_PATH}/faster_rcnn_R_50_FPN_1x[LR=0.002][('guns_edgecase',)]/model_0039999.pth"},
132 | {'steps': 80000, 'lr': 0.002, 'net': 'faster_rcnn_R_50_FPN_1x', 'train': ('guns_granada_train', 'guns_simulacro_1', 'guns_simulacro_7',), 'test': ("guns_granada_test",), 'from': f"{EXPERIMENTS_OUTPUT_PATH}/faster_rcnn_R_50_FPN_1x[LR=0.002][('unity_synthetic_500',)]/model_0039999.pth"},
133 | {'steps': 80000, 'lr': 0.002, 'net': 'faster_rcnn_R_50_FPN_1x', 'train': ('guns_granada_train', 'guns_simulacro_1', 'guns_simulacro_7',), 'test': ("guns_granada_test",), 'from': f"{EXPERIMENTS_OUTPUT_PATH}/faster_rcnn_R_50_FPN_1x[LR=0.002][('unity_synthetic_2500',)]/model_0039999.pth"},
134 | {'steps': 40000, 'lr': 0.002, 'net': 'faster_rcnn_R_50_FPN_1x', 'train': ('guns_granada_train',), 'test': ("guns_granada_test",)},
135 | {'steps': 40000, 'lr': 0.002, 'net': 'faster_rcnn_R_50_FPN_1x', 'train': ('guns_granada_train', 'guns_simulacro_1', 'guns_simulacro_7'), 'test': ("guns_granada_test",)},
136 |
137 | {'steps': 40000, 'lr': 0.002, 'net': 'faster_rcnn_R_50_FPN_1x', 'train': ('guns_simulacro_1', 'guns_simulacro_7'), 'test': ("guns_granada_test",)},
138 | {'steps': 80000, 'lr': 0.002, 'net': 'faster_rcnn_R_50_FPN_1x', 'train': ('guns_granada_train',), 'test': ("guns_granada_test",)},
139 |
140 | {'steps': 40000, 'lr': 0.002, 'net': 'faster101', 'train': ('guns_edgecase',), 'test': ('guns_granada_test',),},
141 | {'steps': 40000, 'lr': 0.002, 'net': 'faster101', 'train': ('unity_synthetic_500',), 'test': ('guns_granada_test',),},
142 | {'steps': 40000, 'lr': 0.002, 'net': 'faster101', 'train': ('unity_synthetic_1000',), 'test': ('guns_granada_test',),},
143 | {'steps': 40000, 'lr': 0.002, 'net': 'faster101', 'train': ('unity_synthetic_2500',), 'test': ('guns_granada_test',),},
144 |
145 | {'steps': 80000, 'lr': 0.002, 'net': 'faster101', 'train': ('guns_granada_train',), 'test': ('guns_granada_test',), 'from': f"{EXPERIMENTS_OUTPUT_PATH}/faster101[LR=0.002][('guns_edgecase',)]/model_0039999.pth"},
146 | {'steps': 80000, 'lr': 0.002, 'net': 'faster101', 'train': ('guns_granada_train',), 'test': ('guns_granada_test',), 'from': f"{EXPERIMENTS_OUTPUT_PATH}/faster101[LR=0.002][('unity_synthetic_500',)]/model_0039999.pth"},
147 | {'steps': 80000, 'lr': 0.002, 'net': 'faster101', 'train': ('guns_granada_train',), 'test': ('guns_granada_test',), 'from': f"{EXPERIMENTS_OUTPUT_PATH}/faster101[LR=0.002][('unity_synthetic_1000',)]/model_0039999.pth"},
148 | {'steps': 80000, 'lr': 0.002, 'net': 'faster101', 'train': ('guns_granada_train',), 'test': ('guns_granada_test',), 'from': f"{EXPERIMENTS_OUTPUT_PATH}/faster101[LR=0.002][('unity_synthetic_2500',)]/model_0039999.pth"},
149 |
150 | {'steps': 20000, 'lr': 0.002, 'net': 'faster_rcnn_R_50_FPN_1x', 'train': ('guns_edgecase',),},
151 | {'steps': 20000, 'lr': 0.002, 'net': 'faster_rcnn_R_50_FPN_1x', 'train': ('guns_edgecase', 'guns_granada_train', 'guns_simulacro_1', 'guns_simulacro_7'), 'test': ("guns_granada_test",),},
152 | {'steps': 20000, 'lr': 0.002, 'net': 'faster_rcnn_R_50_FPN_1x', 'train': ('guns_edgecase', 'guns_granada_train', 'guns_simulacro_1', 'guns_simulacro_7', 'alamy'), 'test': ("guns_granada_test",), 'test': ("guns_granada_test",),},
153 | {'steps': 20000, 'lr': 0.002, 'net': 'faster_rcnn_R_50_FPN_1x', 'train': ('unity_synthetic_1000',), 'test': ("guns_granada_test",),},
154 | {'steps': 20000, 'lr': 0.002, 'net': 'faster_rcnn_R_50_FPN_1x', 'train': ('unity_synthetic_2500',), 'test': ("guns_granada_test",),},
155 | {'steps': 20000, 'lr': 0.002, 'net': 'faster_rcnn_R_50_FPN_1x', 'train': ('unity_synthetic_1000', 'guns_granada_train', 'guns_simulacro_1', 'guns_simulacro_7'), 'test': ("guns_granada_test",),},
156 | {'steps': 20000, 'lr': 0.002, 'net': 'faster_rcnn_R_50_FPN_1x', 'train': ('unity_synthetic_2500', 'guns_granada_train', 'guns_simulacro_1', 'guns_simulacro_7'), 'test': ("guns_granada_test",),},
157 |
158 |
159 | ]
160 |
161 | def train(exp):
162 | print(f'Executing experiment: \n{exp}\n')
163 | cfg = get_cfg()
164 |
165 | cfg.merge_from_file(networks[exp['net']]['cfg'])
166 |
167 | cfg.MODEL.WEIGHTS = exp.get('from', networks[exp['net']]['weights'])
168 |
169 | cfg.INPUT.MAX_SIZE_TEST = 2500
170 | cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1
171 | cfg.DATALOADER.NUM_WORKERS = 2
172 |
173 | cfg.DATASETS.TRAIN = exp['train']
174 | cfg.DATASETS.TEST = exp['test']
175 | cfg.SOLVER.IMS_PER_BATCH = 2
176 | cfg.SOLVER.BASE_LR = exp['lr']
177 | cfg.SOLVER.MAX_ITER = exp['steps'] # 300 iterations seems good enough, but you can certainly train longer
178 | cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512
179 | cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS = True
180 |
181 | cfg.SOLVER.CHECKPOINT_PERIOD = 5000
182 | cfg.TEST.EVAL_PERIOD = 0
183 | #cfg.INPUT.MIN_SIZE_TRAIN = (480, 512, 640, 672, 704, 736, 768, 800)
184 |
185 | if 'from' in exp:
186 | trainFromStr = exp['from'].split('/')[-2]
187 | cfg.OUTPUT_DIR = f"{EXPERIMENTS_OUTPUT_PATH}/{exp['net']}[LR={exp['lr']}][{str(exp['train'])}]FROM[{trainFromStr}]"
188 | #if not os.path.exists(cfg.OUTPUT_DIR):
189 | # print(f"copying {trainFromFolder}\nto {cfg.OUTPUT_DIR}")
190 | # shutil.copytree(trainFromFolder, cfg.OUTPUT_DIR)
191 | else:
192 | cfg.OUTPUT_DIR = f"{EXPERIMENTS_OUTPUT_PATH}/{exp['net']}[LR={exp['lr']}][{str(exp['train'])}]"
193 |
194 | cfg.freeze()
195 |
196 | os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
197 |
198 | class Trainer(DefaultTrainer):
199 | @classmethod
200 | def build_evaluator(cls, cfg_, dataset_name):
201 | return COCOEvaluator(dataset_name, cfg_, distributed=False)
202 |
203 | trainer = Trainer(cfg)
204 | trainer.resume_or_load(resume=True)
205 | trainer.train()
206 |
207 | def test(exp, selected_dataset, theshold):
208 | print(f'Testing experiment: \n{exp}\n')
209 | cfg = get_cfg()
210 | cfg.merge_from_file(networks[exp['net']]['cfg'])
211 | cfg.DATASETS.TEST = exp['test']
212 | cfg.INPUT.MAX_SIZE_TEST = 2500
213 | cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1
214 | cfg.DATALOADER.NUM_WORKERS = 2
215 |
216 | if 'from' in exp:
217 | trainFromStr = exp['from'].split('/')[-2]
218 |
219 | cfg.OUTPUT_DIR = f"{EXPERIMENTS_OUTPUT_PATH}/{exp['net']}[LR={exp['lr']}][{str(exp['train'])}]FROM[{trainFromStr}]"
220 | else:
221 | cfg.OUTPUT_DIR = f"{EXPERIMENTS_OUTPUT_PATH}/{exp['net']}[LR={exp['lr']}][{str(exp['train'])}]"
222 |
223 | cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, 'model_final.pth')
224 |
225 | if all([ds in DATASETS_REAL or ds in DATASETS_SYTH for ds in exp['train']]):
226 | output_dir = f"{cfg.OUTPUT_DIR}/{selected_dataset}"
227 | model = build_model(cfg)
228 | DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
229 | cfg.MODEL.WEIGHTS, resume=False
230 | )
231 | evaluator = COCOEvaluator(selected_dataset, cfg, False, output_dir=output_dir)
232 | val_loader = build_detection_test_loader(cfg, selected_dataset)
233 | inference_on_dataset(model, val_loader, evaluator)
234 |
235 | if __name__ == '__main__':
236 | module = args.id - 1
237 | size = 2
238 |
239 | num_gpus = 2
240 | port = 2 ** 15 + 2 ** 14 + hash(os.getuid()) % 2 ** 14
241 |
242 | for i, exp in enumerate(experiments):
243 | if i % size != module:
244 | continue
245 | train(exp)
246 | # launch(
247 | # train,
248 | # num_gpus,
249 | # dist_url="tcp://127.0.0.1:{}".format(port),
250 | # args=(exp,)
251 | # )
252 |
--------------------------------------------------------------------------------
/LICENSE.md:
--------------------------------------------------------------------------------
1 | Creative Commons Attribution-NonCommercial 4.0 International
2 |
3 | Creative Commons Corporation ("Creative Commons") is not a law firm and
4 | does not provide legal services or legal advice. Distribution of
5 | Creative Commons public licenses does not create a lawyer-client or
6 | other relationship. Creative Commons makes its licenses and related
7 | information available on an "as-is" basis. Creative Commons gives no
8 | warranties regarding its licenses, any material licensed under their
9 | terms and conditions, or any related information. Creative Commons
10 | disclaims all liability for damages resulting from their use to the
11 | fullest extent possible.
12 |
13 | Using Creative Commons Public Licenses
14 |
15 | Creative Commons public licenses provide a standard set of terms and
16 | conditions that creators and other rights holders may use to share
17 | original works of authorship and other material subject to copyright and
18 | certain other rights specified in the public license below. The
19 | following considerations are for informational purposes only, are not
20 | exhaustive, and do not form part of our licenses.
21 |
22 | - Considerations for licensors: Our public licenses are intended for
23 | use by those authorized to give the public permission to use
24 | material in ways otherwise restricted by copyright and certain other
25 | rights. Our licenses are irrevocable. Licensors should read and
26 | understand the terms and conditions of the license they choose
27 | before applying it. Licensors should also secure all rights
28 | necessary before applying our licenses so that the public can reuse
29 | the material as expected. Licensors should clearly mark any material
30 | not subject to the license. This includes other CC-licensed
31 | material, or material used under an exception or limitation to
32 | copyright. More considerations for licensors :
33 | wiki.creativecommons.org/Considerations\_for\_licensors
34 |
35 | - Considerations for the public: By using one of our public licenses,
36 | a licensor grants the public permission to use the licensed material
37 | under specified terms and conditions. If the licensor's permission
38 | is not necessary for any reason–for example, because of any
39 | applicable exception or limitation to copyright–then that use is not
40 | regulated by the license. Our licenses grant only permissions under
41 | copyright and certain other rights that a licensor has authority to
42 | grant. Use of the licensed material may still be restricted for
43 | other reasons, including because others have copyright or other
44 | rights in the material. A licensor may make special requests, such
45 | as asking that all changes be marked or described. Although not
46 | required by our licenses, you are encouraged to respect those
47 | requests where reasonable. More considerations for the public :
48 | wiki.creativecommons.org/Considerations\_for\_licensees
49 |
50 | Creative Commons Attribution-NonCommercial 4.0 International Public
51 | License
52 |
53 | By exercising the Licensed Rights (defined below), You accept and agree
54 | to be bound by the terms and conditions of this Creative Commons
55 | Attribution-NonCommercial 4.0 International Public License ("Public
56 | License"). To the extent this Public License may be interpreted as a
57 | contract, You are granted the Licensed Rights in consideration of Your
58 | acceptance of these terms and conditions, and the Licensor grants You
59 | such rights in consideration of benefits the Licensor receives from
60 | making the Licensed Material available under these terms and conditions.
61 |
62 | - Section 1 – Definitions.
63 |
64 | - a. Adapted Material means material subject to Copyright and
65 | Similar Rights that is derived from or based upon the Licensed
66 | Material and in which the Licensed Material is translated,
67 | altered, arranged, transformed, or otherwise modified in a
68 | manner requiring permission under the Copyright and Similar
69 | Rights held by the Licensor. For purposes of this Public
70 | License, where the Licensed Material is a musical work,
71 | performance, or sound recording, Adapted Material is always
72 | produced where the Licensed Material is synched in timed
73 | relation with a moving image.
74 | - b. Adapter's License means the license You apply to Your
75 | Copyright and Similar Rights in Your contributions to Adapted
76 | Material in accordance with the terms and conditions of this
77 | Public License.
78 | - c. Copyright and Similar Rights means copyright and/or similar
79 | rights closely related to copyright including, without
80 | limitation, performance, broadcast, sound recording, and Sui
81 | Generis Database Rights, without regard to how the rights are
82 | labeled or categorized. For purposes of this Public License, the
83 | rights specified in Section 2(b)(1)-(2) are not Copyright and
84 | Similar Rights.
85 | - d. Effective Technological Measures means those measures that,
86 | in the absence of proper authority, may not be circumvented
87 | under laws fulfilling obligations under Article 11 of the WIPO
88 | Copyright Treaty adopted on December 20, 1996, and/or similar
89 | international agreements.
90 | - e. Exceptions and Limitations means fair use, fair dealing,
91 | and/or any other exception or limitation to Copyright and
92 | Similar Rights that applies to Your use of the Licensed
93 | Material.
94 | - f. Licensed Material means the artistic or literary work,
95 | database, or other material to which the Licensor applied this
96 | Public License.
97 | - g. Licensed Rights means the rights granted to You subject to
98 | the terms and conditions of this Public License, which are
99 | limited to all Copyright and Similar Rights that apply to Your
100 | use of the Licensed Material and that the Licensor has authority
101 | to license.
102 | - h. Licensor means the individual(s) or entity(ies) granting
103 | rights under this Public License.
104 | - i. NonCommercial means not primarily intended for or directed
105 | towards commercial advantage or monetary compensation. For
106 | purposes of this Public License, the exchange of the Licensed
107 | Material for other material subject to Copyright and Similar
108 | Rights by digital file-sharing or similar means is NonCommercial
109 | provided there is no payment of monetary compensation in
110 | connection with the exchange.
111 | - j. Share means to provide material to the public by any means or
112 | process that requires permission under the Licensed Rights, such
113 | as reproduction, public display, public performance,
114 | distribution, dissemination, communication, or importation, and
115 | to make material available to the public including in ways that
116 | members of the public may access the material from a place and
117 | at a time individually chosen by them.
118 | - k. Sui Generis Database Rights means rights other than copyright
119 | resulting from Directive 96/9/EC of the European Parliament and
120 | of the Council of 11 March 1996 on the legal protection of
121 | databases, as amended and/or succeeded, as well as other
122 | essentially equivalent rights anywhere in the world.
123 | - l. You means the individual or entity exercising the Licensed
124 | Rights under this Public License. Your has a corresponding
125 | meaning.
126 |
127 | - Section 2 – Scope.
128 |
129 | - a. License grant.
130 | - 1. Subject to the terms and conditions of this Public
131 | License, the Licensor hereby grants You a worldwide,
132 | royalty-free, non-sublicensable, non-exclusive, irrevocable
133 | license to exercise the Licensed Rights in the Licensed
134 | Material to:
135 | - A. reproduce and Share the Licensed Material, in whole
136 | or in part, for NonCommercial purposes only; and
137 | - B. produce, reproduce, and Share Adapted Material for
138 | NonCommercial purposes only.
139 | - 2. Exceptions and Limitations. For the avoidance of doubt,
140 | where Exceptions and Limitations apply to Your use, this
141 | Public License does not apply, and You do not need to comply
142 | with its terms and conditions.
143 | - 3. Term. The term of this Public License is specified in
144 | Section 6(a).
145 | - 4. Media and formats; technical modifications allowed. The
146 | Licensor authorizes You to exercise the Licensed Rights in
147 | all media and formats whether now known or hereafter
148 | created, and to make technical modifications necessary to do
149 | so. The Licensor waives and/or agrees not to assert any
150 | right or authority to forbid You from making technical
151 | modifications necessary to exercise the Licensed Rights,
152 | including technical modifications necessary to circumvent
153 | Effective Technological Measures. For purposes of this
154 | Public License, simply making modifications authorized by
155 | this Section 2(a)(4) never produces Adapted Material.
156 | - 5. Downstream recipients.
157 | - A. Offer from the Licensor – Licensed Material. Every
158 | recipient of the Licensed Material automatically
159 | receives an offer from the Licensor to exercise the
160 | Licensed Rights under the terms and conditions of this
161 | Public License.
162 | - B. No downstream restrictions. You may not offer or
163 | impose any additional or different terms or conditions
164 | on, or apply any Effective Technological Measures to,
165 | the Licensed Material if doing so restricts exercise of
166 | the Licensed Rights by any recipient of the Licensed
167 | Material.
168 | - 6. No endorsement. Nothing in this Public License
169 | constitutes or may be construed as permission to assert or
170 | imply that You are, or that Your use of the Licensed
171 | Material is, connected with, or sponsored, endorsed, or
172 | granted official status by, the Licensor or others
173 | designated to receive attribution as provided in Section
174 | 3(a)(1)(A)(i).
175 | - b. Other rights.
176 | - 1. Moral rights, such as the right of integrity, are not
177 | licensed under this Public License, nor are publicity,
178 | privacy, and/or other similar personality rights; however,
179 | to the extent possible, the Licensor waives and/or agrees
180 | not to assert any such rights held by the Licensor to the
181 | limited extent necessary to allow You to exercise the
182 | Licensed Rights, but not otherwise.
183 | - 2. Patent and trademark rights are not licensed under this
184 | Public License.
185 | - 3. To the extent possible, the Licensor waives any right to
186 | collect royalties from You for the exercise of the Licensed
187 | Rights, whether directly or through a collecting society
188 | under any voluntary or waivable statutory or compulsory
189 | licensing scheme. In all other cases the Licensor expressly
190 | reserves any right to collect such royalties, including when
191 | the Licensed Material is used other than for NonCommercial
192 | purposes.
193 |
194 | - Section 3 – License Conditions.
195 |
196 | Your exercise of the Licensed Rights is expressly made subject to
197 | the following conditions.
198 |
199 | - a. Attribution.
200 | - 1. If You Share the Licensed Material (including in modified
201 | form), You must:
202 | - A. retain the following if it is supplied by the
203 | Licensor with the Licensed Material:
204 | - i. identification of the creator(s) of the Licensed
205 | Material and any others designated to receive
206 | attribution, in any reasonable manner requested by
207 | the Licensor (including by pseudonym if designated);
208 | - ii. a copyright notice;
209 | - iii. a notice that refers to this Public License;
210 | - iv. a notice that refers to the disclaimer of
211 | warranties;
212 | - v. a URI or hyperlink to the Licensed Material to
213 | the extent reasonably practicable;
214 | - B. indicate if You modified the Licensed Material and
215 | retain an indication of any previous modifications; and
216 | - C. indicate the Licensed Material is licensed under this
217 | Public License, and include the text of, or the URI or
218 | hyperlink to, this Public License.
219 | - 2. You may satisfy the conditions in Section 3(a)(1) in any
220 | reasonable manner based on the medium, means, and context in
221 | which You Share the Licensed Material. For example, it may
222 | be reasonable to satisfy the conditions by providing a URI
223 | or hyperlink to a resource that includes the required
224 | information.
225 | - 3. If requested by the Licensor, You must remove any of the
226 | information required by Section 3(a)(1)(A) to the extent
227 | reasonably practicable.
228 | - 4. If You Share Adapted Material You produce, the Adapter's
229 | License You apply must not prevent recipients of the Adapted
230 | Material from complying with this Public License.
231 |
232 | - Section 4 – Sui Generis Database Rights.
233 |
234 | Where the Licensed Rights include Sui Generis Database Rights that
235 | apply to Your use of the Licensed Material:
236 |
237 | - a. for the avoidance of doubt, Section 2(a)(1) grants You the
238 | right to extract, reuse, reproduce, and Share all or a
239 | substantial portion of the contents of the database for
240 | NonCommercial purposes only;
241 | - b. if You include all or a substantial portion of the database
242 | contents in a database in which You have Sui Generis Database
243 | Rights, then the database in which You have Sui Generis Database
244 | Rights (but not its individual contents) is Adapted Material;
245 | and
246 | - c. You must comply with the conditions in Section 3(a) if You
247 | Share all or a substantial portion of the contents of the
248 | database.
249 |
250 | For the avoidance of doubt, this Section 4 supplements and does not
251 | replace Your obligations under this Public License where the
252 | Licensed Rights include other Copyright and Similar Rights.
253 |
254 | - Section 5 – Disclaimer of Warranties and Limitation of Liability.
255 |
256 | - a. Unless otherwise separately undertaken by the Licensor, to
257 | the extent possible, the Licensor offers the Licensed Material
258 | as-is and as-available, and makes no representations or
259 | warranties of any kind concerning the Licensed Material, whether
260 | express, implied, statutory, or other. This includes, without
261 | limitation, warranties of title, merchantability, fitness for a
262 | particular purpose, non-infringement, absence of latent or other
263 | defects, accuracy, or the presence or absence of errors, whether
264 | or not known or discoverable. Where disclaimers of warranties
265 | are not allowed in full or in part, this disclaimer may not
266 | apply to You.
267 | - b. To the extent possible, in no event will the Licensor be
268 | liable to You on any legal theory (including, without
269 | limitation, negligence) or otherwise for any direct, special,
270 | indirect, incidental, consequential, punitive, exemplary, or
271 | other losses, costs, expenses, or damages arising out of this
272 | Public License or use of the Licensed Material, even if the
273 | Licensor has been advised of the possibility of such losses,
274 | costs, expenses, or damages. Where a limitation of liability is
275 | not allowed in full or in part, this limitation may not apply to
276 | You.
277 | - c. The disclaimer of warranties and limitation of liability
278 | provided above shall be interpreted in a manner that, to the
279 | extent possible, most closely approximates an absolute
280 | disclaimer and waiver of all liability.
281 |
282 | - Section 6 – Term and Termination.
283 |
284 | - a. This Public License applies for the term of the Copyright and
285 | Similar Rights licensed here. However, if You fail to comply
286 | with this Public License, then Your rights under this Public
287 | License terminate automatically.
288 | - b. Where Your right to use the Licensed Material has terminated
289 | under Section 6(a), it reinstates:
290 |
291 | - 1. automatically as of the date the violation is cured,
292 | provided it is cured within 30 days of Your discovery of the
293 | violation; or
294 | - 2. upon express reinstatement by the Licensor.
295 |
296 | For the avoidance of doubt, this Section 6(b) does not affect
297 | any right the Licensor may have to seek remedies for Your
298 | violations of this Public License.
299 |
300 | - c. For the avoidance of doubt, the Licensor may also offer the
301 | Licensed Material under separate terms or conditions or stop
302 | distributing the Licensed Material at any time; however, doing
303 | so will not terminate this Public License.
304 | - d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
305 | License.
306 |
307 | - Section 7 – Other Terms and Conditions.
308 |
309 | - a. The Licensor shall not be bound by any additional or
310 | different terms or conditions communicated by You unless
311 | expressly agreed.
312 | - b. Any arrangements, understandings, or agreements regarding the
313 | Licensed Material not stated herein are separate from and
314 | independent of the terms and conditions of this Public License.
315 |
316 | - Section 8 – Interpretation.
317 |
318 | - a. For the avoidance of doubt, this Public License does not, and
319 | shall not be interpreted to, reduce, limit, restrict, or impose
320 | conditions on any use of the Licensed Material that could
321 | lawfully be made without permission under this Public License.
322 | - b. To the extent possible, if any provision of this Public
323 | License is deemed unenforceable, it shall be automatically
324 | reformed to the minimum extent necessary to make it enforceable.
325 | If the provision cannot be reformed, it shall be severed from
326 | this Public License without affecting the enforceability of the
327 | remaining terms and conditions.
328 | - c. No term or condition of this Public License will be waived
329 | and no failure to comply consented to unless expressly agreed to
330 | by the Licensor.
331 | - d. Nothing in this Public License constitutes or may be
332 | interpreted as a limitation upon, or waiver of, any privileges
333 | and immunities that apply to the Licensor or You, including from
334 | the legal processes of any jurisdiction or authority.
335 |
336 | Creative Commons is not a party to its public licenses. Notwithstanding,
337 | Creative Commons may elect to apply one of its public licenses to
338 | material it publishes and in those instances will be considered the
339 | "Licensor." The text of the Creative Commons public licenses is
340 | dedicated to the public domain under the CC0 Public Domain Dedication.
341 | Except for the limited purpose of indicating that material is shared
342 | under a Creative Commons public license or as otherwise permitted by the
343 | Creative Commons policies published at creativecommons.org/policies,
344 | Creative Commons does not authorize the use of the trademark "Creative
345 | Commons" or any other trademark or logo of Creative Commons without its
346 | prior written consent including, without limitation, in connection with
347 | any unauthorized modifications to any of its public licenses or any
348 | other arrangements, understandings, or agreements concerning use of
349 | licensed material. For the avoidance of doubt, this paragraph does not
350 | form part of the public licenses.
351 |
352 | Creative Commons may be contacted at creativecommons.org.
353 |
--------------------------------------------------------------------------------