├── test_yolov5.jpg
├── requirements.txt
├── README.md
├── Dockerfile
├── hubconf.py
├── detect_one.py
├── detect.py
├── loss.py
├── yolo.py
├── test.py
├── general.py
├── train.py
├── LICENSE
└── face_datasets.py
/test_yolov5.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xialuxi/yolov5_face_landmark/HEAD/test_yolov5.jpg
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | # pip install -r requirements.txt
2 |
3 | # base ----------------------------------------
4 | Cython
5 | matplotlib>=3.2.2
6 | numpy>=1.18.5
7 | opencv-python>=4.1.2
8 | Pillow
9 | PyYAML>=5.3
10 | scipy>=1.4.1
11 | tensorboard>=2.2
12 | torch>=1.7.0
13 | torchvision>=0.8.1
14 | tqdm>=4.41.0
15 |
16 | # logging -------------------------------------
17 | # wandb
18 |
19 | # plotting ------------------------------------
20 | seaborn>=0.11.0
21 | pandas
22 |
23 | # export --------------------------------------
24 | # coremltools==4.0
25 | # onnx>=1.8.0
26 | # scikit-learn==0.19.2 # for coreml quantization
27 |
28 | # extras --------------------------------------
29 | thop # FLOPS computation
30 | pycocotools>=2.0 # COCO mAP
31 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # yolov5_face_landmark
2 | ## 基于yolov5的人脸检测,带关键点检测
3 | > 代码说明:
4 | + 1,在yolov5的检测基础上,加上关键点回归分支,请先下载yolov5的工程:https://github.com/ultralytics/yolov5
5 | + 2,detect_one.py是单张图片的测试代码, 基于部分wideface训练的模型,稍后在百度云公开。
6 | >> 主要修改代码部分:
7 | + (1)hyp.scatch.yaml中增加关键点loss的超参数(landmark: 0.5)
8 | + (2)yolo.py中增加了关键点回归的计算
9 | + (3)face_datasets.py为人脸数据的读取方式,准备数据的格式参考yolov5的格式,在后面增加关键点的坐标(归一化)
10 | + (4) loss.py中增加关键点回归的loss计算
11 | + (5) 链接: https://pan.baidu.com/s/1zjPIF2NZ9CGtB2iUCox6hw 密码: j83n
12 | + (6) 效果图 : 
13 | > 关于口罩人脸的问题:
14 | + 1,增加口罩人脸这个类别,建议不要直接在检测分支中增加类别。
15 | + 2,应该在关键点分支额外增加一个属性分支,接一个二分类,判断有没有戴口罩。
16 | + 3,这样可以减少口罩人脸的误检问题
17 | > 关于关键点的问题:
18 | + 1,建议可以替换成wingloss训练,可以优化关键点的精准度。
19 | + 2,可以解决关键点的离群点问题
20 | + 3,wideface之中有不少特别小的人脸,如果不处理会对精度有一定的影响。
21 | + 完整的代码可以参考: https://github.com/deepcam-cn/yolov5-face
22 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | # Start FROM Nvidia PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch
2 | FROM nvcr.io/nvidia/pytorch:20.12-py3
3 |
4 | # Install linux packages
5 | RUN apt update && apt install -y screen libgl1-mesa-glx
6 |
7 | # Install python dependencies
8 | RUN pip install --upgrade pip
9 | COPY requirements.txt .
10 | RUN pip install -r requirements.txt
11 | RUN pip install gsutil
12 |
13 | # Create working directory
14 | RUN mkdir -p /usr/src/app
15 | WORKDIR /usr/src/app
16 |
17 | # Copy contents
18 | COPY . /usr/src/app
19 |
20 | # Copy weights
21 | #RUN python3 -c "from models import *; \
22 | #attempt_download('weights/yolov5s.pt'); \
23 | #attempt_download('weights/yolov5m.pt'); \
24 | #attempt_download('weights/yolov5l.pt')"
25 |
26 |
27 | # --------------------------------------------------- Extras Below ---------------------------------------------------
28 |
29 | # Build and Push
30 | # t=ultralytics/yolov5:latest && sudo docker build -t $t . && sudo docker push $t
31 | # for v in {300..303}; do t=ultralytics/coco:v$v && sudo docker build -t $t . && sudo docker push $t; done
32 |
33 | # Pull and Run
34 | # t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all $t
35 |
36 | # Pull and Run with local directory access
37 | # t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all -v "$(pwd)"/coco:/usr/src/coco $t
38 |
39 | # Kill all
40 | # sudo docker kill $(sudo docker ps -q)
41 |
42 | # Kill all image-based
43 | # sudo docker kill $(sudo docker ps -a -q --filter ancestor=ultralytics/yolov5:latest)
44 |
45 | # Bash into running container
46 | # sudo docker container exec -it ba65811811ab bash
47 |
48 | # Bash into stopped container
49 | # sudo docker commit 092b16b25c5b usr/resume && sudo docker run -it --gpus all --ipc=host -v "$(pwd)"/coco:/usr/src/coco --entrypoint=sh usr/resume
50 |
51 | # Send weights to GCP
52 | # python -c "from utils.general import *; strip_optimizer('runs/train/exp0_*/weights/best.pt', 'tmp.pt')" && gsutil cp tmp.pt gs://*.pt
53 |
54 | # Clean up
55 | # docker system prune -a --volumes
56 |
--------------------------------------------------------------------------------
/hubconf.py:
--------------------------------------------------------------------------------
1 | """File for accessing YOLOv5 via PyTorch Hub https://pytorch.org/hub/
2 |
3 | Usage:
4 | import torch
5 | model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True, channels=3, classes=80)
6 | """
7 |
8 | from pathlib import Path
9 |
10 | import torch
11 |
12 | from models.yolo import Model
13 | from utils.general import set_logging
14 | from utils.google_utils import attempt_download
15 |
16 | dependencies = ['torch', 'yaml']
17 | set_logging()
18 |
19 |
20 | def create(name, pretrained, channels, classes, autoshape):
21 | """Creates a specified YOLOv5 model
22 |
23 | Arguments:
24 | name (str): name of model, i.e. 'yolov5s'
25 | pretrained (bool): load pretrained weights into the model
26 | channels (int): number of input channels
27 | classes (int): number of model classes
28 |
29 | Returns:
30 | pytorch model
31 | """
32 | config = Path(__file__).parent / 'models' / f'{name}.yaml' # model.yaml path
33 | try:
34 | model = Model(config, channels, classes)
35 | if pretrained:
36 | fname = f'{name}.pt' # checkpoint filename
37 | attempt_download(fname) # download if not found locally
38 | ckpt = torch.load(fname, map_location=torch.device('cpu')) # load
39 | state_dict = ckpt['model'].float().state_dict() # to FP32
40 | state_dict = {k: v for k, v in state_dict.items() if model.state_dict()[k].shape == v.shape} # filter
41 | model.load_state_dict(state_dict, strict=False) # load
42 | if len(ckpt['model'].names) == classes:
43 | model.names = ckpt['model'].names # set class names attribute
44 | if autoshape:
45 | model = model.autoshape() # for file/URI/PIL/cv2/np inputs and NMS
46 | return model
47 |
48 | except Exception as e:
49 | help_url = 'https://github.com/ultralytics/yolov5/issues/36'
50 | s = 'Cache maybe be out of date, try force_reload=True. See %s for help.' % help_url
51 | raise Exception(s) from e
52 |
53 |
54 | def yolov5s(pretrained=False, channels=3, classes=80, autoshape=True):
55 | """YOLOv5-small model from https://github.com/ultralytics/yolov5
56 |
57 | Arguments:
58 | pretrained (bool): load pretrained weights into the model, default=False
59 | channels (int): number of input channels, default=3
60 | classes (int): number of model classes, default=80
61 |
62 | Returns:
63 | pytorch model
64 | """
65 | return create('yolov5s', pretrained, channels, classes, autoshape)
66 |
67 |
68 | def yolov5m(pretrained=False, channels=3, classes=80, autoshape=True):
69 | """YOLOv5-medium model from https://github.com/ultralytics/yolov5
70 |
71 | Arguments:
72 | pretrained (bool): load pretrained weights into the model, default=False
73 | channels (int): number of input channels, default=3
74 | classes (int): number of model classes, default=80
75 |
76 | Returns:
77 | pytorch model
78 | """
79 | return create('yolov5m', pretrained, channels, classes, autoshape)
80 |
81 |
82 | def yolov5l(pretrained=False, channels=3, classes=80, autoshape=True):
83 | """YOLOv5-large model from https://github.com/ultralytics/yolov5
84 |
85 | Arguments:
86 | pretrained (bool): load pretrained weights into the model, default=False
87 | channels (int): number of input channels, default=3
88 | classes (int): number of model classes, default=80
89 |
90 | Returns:
91 | pytorch model
92 | """
93 | return create('yolov5l', pretrained, channels, classes, autoshape)
94 |
95 |
96 | def yolov5x(pretrained=False, channels=3, classes=80, autoshape=True):
97 | """YOLOv5-xlarge model from https://github.com/ultralytics/yolov5
98 |
99 | Arguments:
100 | pretrained (bool): load pretrained weights into the model, default=False
101 | channels (int): number of input channels, default=3
102 | classes (int): number of model classes, default=80
103 |
104 | Returns:
105 | pytorch model
106 | """
107 | return create('yolov5x', pretrained, channels, classes, autoshape)
108 |
109 |
110 | def custom(path_or_model='path/to/model.pt', autoshape=True):
111 | """YOLOv5-custom model from https://github.com/ultralytics/yolov5
112 |
113 | Arguments (3 options):
114 | path_or_model (str): 'path/to/model.pt'
115 | path_or_model (dict): torch.load('path/to/model.pt')
116 | path_or_model (nn.Module): torch.load('path/to/model.pt')['model']
117 |
118 | Returns:
119 | pytorch model
120 | """
121 | model = torch.load(path_or_model) if isinstance(path_or_model, str) else path_or_model # load checkpoint
122 | if isinstance(model, dict):
123 | model = model['model'] # load model
124 |
125 | hub_model = Model(model.yaml).to(next(model.parameters()).device) # create
126 | hub_model.load_state_dict(model.float().state_dict()) # load state_dict
127 | hub_model.names = model.names # class names
128 | return hub_model.autoshape() if autoshape else hub_model
129 |
130 |
131 | if __name__ == '__main__':
132 | model = create(name='yolov5s', pretrained=True, channels=3, classes=80, autoshape=True) # pretrained example
133 | # model = custom(path_or_model='path/to/model.pt') # custom example
134 |
135 | # Verify inference
136 | from PIL import Image
137 |
138 | imgs = [Image.open(x) for x in Path('data/images').glob('*.jpg')]
139 | results = model(imgs)
140 | results.show()
141 | results.print()
142 |
--------------------------------------------------------------------------------
/detect_one.py:
--------------------------------------------------------------------------------
1 | # -*- coding: UTF-8 -*-
2 | import argparse
3 | import time
4 | from pathlib import Path
5 |
6 | import cv2
7 | import torch
8 | import torch.backends.cudnn as cudnn
9 | from numpy import random
10 | import copy
11 |
12 | from models.experimental import attempt_load
13 | from utils.datasets import LoadStreams, LoadImages, letterbox
14 | from utils.general import check_img_size, non_max_suppression_face, apply_classifier, scale_coords, xyxy2xywh, \
15 | strip_optimizer, set_logging, increment_path
16 | from utils.plots import plot_one_box
17 | from utils.torch_utils import select_device, load_classifier, time_synchronized
18 |
19 |
20 | def load_model(weights, device):
21 | model = attempt_load(weights, map_location=device) # load FP32 model
22 | return model
23 |
24 |
25 | def scale_coords_landmarks(img1_shape, coords, img0_shape, ratio_pad=None):
26 | # Rescale coords (xyxy) from img1_shape to img0_shape
27 | if ratio_pad is None: # calculate from img0_shape
28 | gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
29 | pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
30 | else:
31 | gain = ratio_pad[0][0]
32 | pad = ratio_pad[1]
33 |
34 | coords[:, [0, 2, 4, 6, 8]] -= pad[0] # x padding
35 | coords[:, [1, 3, 5, 7, 9]] -= pad[1] # y padding
36 | coords[:, :10] /= gain
37 | #clip_coords(coords, img0_shape)
38 | coords[:, 0].clamp_(0, img0_shape[1]) # x1
39 | coords[:, 1].clamp_(0, img0_shape[0]) # y1
40 | coords[:, 2].clamp_(0, img0_shape[1]) # x2
41 | coords[:, 3].clamp_(0, img0_shape[0]) # y2
42 | coords[:, 4].clamp_(0, img0_shape[1]) # x3
43 | coords[:, 5].clamp_(0, img0_shape[0]) # y3
44 | coords[:, 6].clamp_(0, img0_shape[1]) # x4
45 | coords[:, 7].clamp_(0, img0_shape[0]) # y4
46 | coords[:, 8].clamp_(0, img0_shape[1]) # x5
47 | coords[:, 9].clamp_(0, img0_shape[0]) # y5
48 | return coords
49 |
50 |
51 |
52 | def show_results(img, xywh, conf, landmarks, class_num):
53 | h,w,c = img.shape
54 | tl = 1 or round(0.002 * (h + w) / 2) + 1 # line/font thickness
55 | x1 = int(xywh[0] * w - 0.5 * xywh[2] * w)
56 | y1 = int(xywh[1] * h - 0.5 * xywh[3] * h)
57 | x2 = int(xywh[0] * w + 0.5 * xywh[2] * w)
58 | y2 = int(xywh[1] * h + 0.5 * xywh[3] * h)
59 | cv2.rectangle(img, (x1,y1), (x2, y2), (0,255,0), thickness=tl, lineType=cv2.LINE_AA)
60 |
61 | clors = [(255,0,0),(0,255,0),(0,0,255),(255,255,0),(0,255,255)]
62 |
63 | for i in range(5):
64 | point_x = int(landmarks[2 * i] * w)
65 | point_y = int(landmarks[2 * i + 1] * h)
66 | cv2.circle(img, (point_x, point_y), tl+1, clors[i], -1)
67 |
68 | tf = max(tl - 1, 1) # font thickness
69 | label = str(int(class_num)) + ': ' + str(conf)[:5]
70 | cv2.putText(img, label, (x1, y1 - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
71 | return img
72 |
73 |
74 |
75 | def detect_one(model, image_path, device):
76 | # Load model
77 | img_size = 640
78 | conf_thres = 0.3
79 | iou_thres = 0.5
80 |
81 | orgimg = cv2.imread(image_path) # BGR
82 | img0 = copy.deepcopy(orgimg)
83 | assert orgimg is not None, 'Image Not Found ' + image_path
84 | h0, w0 = orgimg.shape[:2] # orig hw
85 | r = img_size / max(h0, w0) # resize image to img_size
86 | if r != 1: # always resize down, only resize up if training with augmentation
87 | interp = cv2.INTER_AREA if r < 1 else cv2.INTER_LINEAR
88 | img0 = cv2.resize(img0, (int(w0 * r), int(h0 * r)), interpolation=interp)
89 |
90 | imgsz = check_img_size(img_size, s=model.stride.max()) # check img_size
91 |
92 | img = letterbox(img0, new_shape=imgsz)[0]
93 | # Convert
94 | img = img[:, :, ::-1].transpose(2, 0, 1).copy() # BGR to RGB, to 3x416x416
95 |
96 | # Run inference
97 | t0 = time.time()
98 |
99 | img = torch.from_numpy(img).to(device)
100 | img = img.float() # uint8 to fp16/32
101 | img /= 255.0 # 0 - 255 to 0.0 - 1.0
102 | if img.ndimension() == 3:
103 | img = img.unsqueeze(0)
104 |
105 | # Inference
106 | t1 = time_synchronized()
107 | pred = model(img)[0]
108 |
109 | # Apply NMS
110 | pred = non_max_suppression_face(pred, conf_thres, iou_thres)
111 | print('pred: ', pred)
112 | t2 = time_synchronized()
113 |
114 |
115 |
116 | print('img.shape: ', img.shape)
117 | print('orgimg.shape: ', orgimg.shape)
118 |
119 | # Process detections
120 | for i, det in enumerate(pred): # detections per image
121 | gn = torch.tensor(orgimg.shape)[[1, 0, 1, 0]].to(device) # normalization gain whwh
122 | gn_lks = torch.tensor(orgimg.shape)[[1, 0, 1, 0, 1, 0, 1, 0, 1, 0]].to(device) # normalization gain landmarks
123 | if len(det):
124 | # Rescale boxes from img_size to im0 size
125 | det[:, :4] = scale_coords(img.shape[2:], det[:, :4], orgimg.shape).round()
126 |
127 | # Print results
128 | for c in det[:, -1].unique():
129 | n = (det[:, -1] == c).sum() # detections per class
130 |
131 | det[:, 5:15] = scale_coords_landmarks(img.shape[2:], det[:, 5:15], orgimg.shape).round()
132 |
133 |
134 | for j in range(det.size()[0]):
135 | xywh = (xyxy2xywh(torch.tensor(det[j, :4]).view(1, 4)) / gn).view(-1).tolist()
136 | conf = det[j, 4].cpu().numpy()
137 | landmarks = (det[j, 5:15].view(1, 10) / gn_lks).view(-1).tolist()
138 | class_num = det[j, 15].cpu().numpy()
139 |
140 |
141 | orgimg = show_results(orgimg, xywh, conf, landmarks, class_num)
142 |
143 |
144 |
145 | # Stream results
146 | print(f'Done. ({time.time() - t0:.3f}s)')
147 |
148 | cv2.imshow('orgimg', orgimg)
149 | if cv2.waitKey(0) == ord('q'): # q to quit
150 | raise StopIteration
151 |
152 |
153 |
154 |
155 | if __name__ == '__main__':
156 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
157 | weights = './runs/train/exp5/weights/last.pt'
158 | model = load_model(weights, device)
159 | image_path = '/home/xialuxi/work/dukto/vi/13_23/5302012120180413230735_64_Camera_1_20180413_230736_0_0_0_0_0_0_0_0_882.jpeg'
160 | detect_one(model, image_path, device)
161 | print('over')
--------------------------------------------------------------------------------
/detect.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import time
3 | from pathlib import Path
4 |
5 | import cv2
6 | import torch
7 | import torch.backends.cudnn as cudnn
8 | from numpy import random
9 |
10 | from models.experimental import attempt_load
11 | from utils.datasets import LoadStreams, LoadImages
12 | from utils.general import check_img_size, non_max_suppression, apply_classifier, scale_coords, xyxy2xywh, \
13 | strip_optimizer, set_logging, increment_path
14 | from utils.plots import plot_one_box
15 | from utils.torch_utils import select_device, load_classifier, time_synchronized
16 |
17 |
18 | def detect(save_img=False):
19 | source, weights, view_img, save_txt, imgsz = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size
20 | print('weights: ', weights)
21 | webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith(
22 | ('rtsp://', 'rtmp://', 'http://'))
23 |
24 | # Directories
25 | save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run
26 | (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
27 |
28 | # Initialize
29 | set_logging()
30 | device = select_device(opt.device)
31 | half = device.type != 'cpu' # half precision only supported on CUDA
32 |
33 | # Load model
34 | model = attempt_load(weights, map_location=device) # load FP32 model
35 | imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size
36 | if half:
37 | model.half() # to FP16
38 |
39 | # Second-stage classifier
40 | classify = False
41 | if classify:
42 | modelc = load_classifier(name='resnet101', n=2) # initialize
43 | modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model']).to(device).eval()
44 |
45 | # Set Dataloader
46 | vid_path, vid_writer = None, None
47 | if webcam:
48 | view_img = True
49 | cudnn.benchmark = True # set True to speed up constant image size inference
50 | dataset = LoadStreams(source, img_size=imgsz)
51 | else:
52 | save_img = True
53 | dataset = LoadImages(source, img_size=imgsz)
54 |
55 | # Get names and colors
56 | names = model.module.names if hasattr(model, 'module') else model.names
57 | colors = [[random.randint(0, 255) for _ in range(3)] for _ in names]
58 |
59 | # Run inference
60 | t0 = time.time()
61 | img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img
62 | _ = model(img.half() if half else img) if device.type != 'cpu' else None # run once
63 | for path, img, im0s, vid_cap in dataset:
64 | img = torch.from_numpy(img).to(device)
65 | img = img.half() if half else img.float() # uint8 to fp16/32
66 | img /= 255.0 # 0 - 255 to 0.0 - 1.0
67 | if img.ndimension() == 3:
68 | img = img.unsqueeze(0)
69 |
70 | # Inference
71 | t1 = time_synchronized()
72 | pred = model(img, augment=opt.augment)[0]
73 |
74 | # Apply NMS
75 | pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms)
76 | t2 = time_synchronized()
77 |
78 | # Apply Classifier
79 | if classify:
80 | pred = apply_classifier(pred, modelc, img, im0s)
81 |
82 | # Process detections
83 | for i, det in enumerate(pred): # detections per image
84 | if webcam: # batch_size >= 1
85 | p, s, im0, frame = path[i], '%g: ' % i, im0s[i].copy(), dataset.count
86 | else:
87 | p, s, im0, frame = path, '', im0s, getattr(dataset, 'frame', 0)
88 |
89 | p = Path(p) # to Path
90 | save_path = str(save_dir / p.name) # img.jpg
91 | txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # img.txt
92 | s += '%gx%g ' % img.shape[2:] # print string
93 | gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
94 | if len(det):
95 | # Rescale boxes from img_size to im0 size
96 | det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
97 |
98 | # Print results
99 | for c in det[:, -1].unique():
100 | n = (det[:, -1] == c).sum() # detections per class
101 | s += f'{n} {names[int(c)]}s, ' # add to string
102 |
103 | # Write results
104 | for *xyxy, conf, cls in reversed(det):
105 | if save_txt: # Write to file
106 | xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
107 | line = (cls, *xywh, conf) if opt.save_conf else (cls, *xywh) # label format
108 | with open(txt_path + '.txt', 'a') as f:
109 | f.write(('%g ' * len(line)).rstrip() % line + '\n')
110 |
111 | if save_img or view_img: # Add bbox to image
112 | label = f'{names[int(cls)]} {conf:.2f}'
113 | plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=3)
114 |
115 | # Print time (inference + NMS)
116 | print(f'{s}Done. ({t2 - t1:.3f}s)')
117 |
118 | # Stream results
119 | if view_img:
120 | cv2.imshow(str(p), im0)
121 | if cv2.waitKey(1) == ord('q'): # q to quit
122 | raise StopIteration
123 |
124 | # Save results (image with detections)
125 | if save_img:
126 | if dataset.mode == 'image':
127 | cv2.imwrite(save_path, im0)
128 | else: # 'video'
129 | if vid_path != save_path: # new video
130 | vid_path = save_path
131 | if isinstance(vid_writer, cv2.VideoWriter):
132 | vid_writer.release() # release previous video writer
133 |
134 | fourcc = 'mp4v' # output video codec
135 | fps = vid_cap.get(cv2.CAP_PROP_FPS)
136 | w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
137 | h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
138 | vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*fourcc), fps, (w, h))
139 | vid_writer.write(im0)
140 |
141 | if save_txt or save_img:
142 | s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
143 | print(f"Results saved to {save_dir}{s}")
144 |
145 | print(f'Done. ({time.time() - t0:.3f}s)')
146 |
147 |
148 | if __name__ == '__main__':
149 | parser = argparse.ArgumentParser()
150 | parser.add_argument('--weights', nargs='+', type=str, default='./weights/yolov5s.pt', help='model.pt path(s)')
151 | parser.add_argument('--source', type=str, default='data/images', help='source') # file/folder, 0 for webcam
152 | parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
153 | parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold')
154 | parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS')
155 | parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
156 | parser.add_argument('--view-img', action='store_true', help='display results')
157 | parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
158 | parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
159 | parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3')
160 | parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
161 | parser.add_argument('--augment', action='store_true', help='augmented inference')
162 | parser.add_argument('--update', action='store_true', help='update all models')
163 | parser.add_argument('--project', default='runs/detect', help='save results to project/name')
164 | parser.add_argument('--name', default='exp', help='save results to project/name')
165 | parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
166 | opt = parser.parse_args()
167 | print(opt)
168 |
169 | with torch.no_grad():
170 | if opt.update: # update all models (to fix SourceChangeWarning)
171 | for opt.weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']:
172 | detect()
173 | strip_optimizer(opt.weights)
174 | else:
175 | detect()
176 |
--------------------------------------------------------------------------------
/loss.py:
--------------------------------------------------------------------------------
1 | # Loss functions
2 |
3 | import torch
4 | import torch.nn as nn
5 |
6 | from utils.general import bbox_iou
7 | from utils.torch_utils import is_parallel
8 |
9 |
10 | def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441
11 | # return positive, negative label smoothing BCE targets
12 | return 1.0 - 0.5 * eps, 0.5 * eps
13 |
14 |
15 | class BCEBlurWithLogitsLoss(nn.Module):
16 | # BCEwithLogitLoss() with reduced missing label effects.
17 | def __init__(self, alpha=0.05):
18 | super(BCEBlurWithLogitsLoss, self).__init__()
19 | self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none') # must be nn.BCEWithLogitsLoss()
20 | self.alpha = alpha
21 |
22 | def forward(self, pred, true):
23 | loss = self.loss_fcn(pred, true)
24 | pred = torch.sigmoid(pred) # prob from logits
25 | dx = pred - true # reduce only missing label effects
26 | # dx = (pred - true).abs() # reduce missing label and false label effects
27 | alpha_factor = 1 - torch.exp((dx - 1) / (self.alpha + 1e-4))
28 | loss *= alpha_factor
29 | return loss.mean()
30 |
31 |
32 | class FocalLoss(nn.Module):
33 | # Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
34 | def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
35 | super(FocalLoss, self).__init__()
36 | self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
37 | self.gamma = gamma
38 | self.alpha = alpha
39 | self.reduction = loss_fcn.reduction
40 | self.loss_fcn.reduction = 'none' # required to apply FL to each element
41 |
42 | def forward(self, pred, true):
43 | loss = self.loss_fcn(pred, true)
44 | # p_t = torch.exp(-loss)
45 | # loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability
46 |
47 | # TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py
48 | pred_prob = torch.sigmoid(pred) # prob from logits
49 | p_t = true * pred_prob + (1 - true) * (1 - pred_prob)
50 | alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)
51 | modulating_factor = (1.0 - p_t) ** self.gamma
52 | loss *= alpha_factor * modulating_factor
53 |
54 | if self.reduction == 'mean':
55 | return loss.mean()
56 | elif self.reduction == 'sum':
57 | return loss.sum()
58 | else: # 'none'
59 | return loss
60 |
61 |
62 | class QFocalLoss(nn.Module):
63 | # Wraps Quality focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
64 | def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
65 | super(QFocalLoss, self).__init__()
66 | self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
67 | self.gamma = gamma
68 | self.alpha = alpha
69 | self.reduction = loss_fcn.reduction
70 | self.loss_fcn.reduction = 'none' # required to apply FL to each element
71 |
72 | def forward(self, pred, true):
73 | loss = self.loss_fcn(pred, true)
74 |
75 | pred_prob = torch.sigmoid(pred) # prob from logits
76 | alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)
77 | modulating_factor = torch.abs(true - pred_prob) ** self.gamma
78 | loss *= alpha_factor * modulating_factor
79 |
80 | if self.reduction == 'mean':
81 | return loss.mean()
82 | elif self.reduction == 'sum':
83 | return loss.sum()
84 | else: # 'none'
85 | return loss
86 |
87 |
88 | class LandmarksLoss(nn.Module):
89 | # BCEwithLogitLoss() with reduced missing label effects.
90 | def __init__(self, alpha=1.0):
91 | super(LandmarksLoss, self).__init__()
92 | self.loss_fcn = nn.SmoothL1Loss(reduction='sum')
93 | self.alpha = alpha
94 |
95 | def forward(self, pred, truel, mask):
96 | loss = self.loss_fcn(pred*mask, truel*mask)
97 | #loss = torch.abs(pred*mask - truel*mask)
98 | #loss = loss.sum(dim = 1)
99 | return loss / (torch.sum(mask) + 10e-14)
100 |
101 |
102 | def compute_loss(p, targets, model): # predictions, targets, model
103 | device = targets.device
104 | lcls, lbox, lobj, lmark = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device)
105 | tcls, tbox, indices, anchors, tlandmarks, lmks_mask = build_targets(p, targets, model) # targets
106 | h = model.hyp # hyperparameters
107 |
108 | # Define criteria
109 | BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device)) # weight=model.class_weights)
110 | BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device))
111 |
112 | landmarks_loss = LandmarksLoss(1.0)
113 |
114 | # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
115 | cp, cn = smooth_BCE(eps=0.0)
116 |
117 | # Focal loss
118 | g = h['fl_gamma'] # focal loss gamma
119 | if g > 0:
120 | BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)
121 |
122 | # Losses
123 | nt = 0 # number of targets
124 | no = len(p) # number of outputs
125 | balance = [4.0, 1.0, 0.4] if no == 3 else [4.0, 1.0, 0.4, 0.1] # P3-5 or P3-6
126 | for i, pi in enumerate(p): # layer index, layer predictions
127 | b, a, gj, gi = indices[i] # image, anchor, gridy, gridx
128 | tobj = torch.zeros_like(pi[..., 0], device=device) # target obj
129 |
130 | n = b.shape[0] # number of targets
131 | if n:
132 | nt += n # cumulative targets
133 | ps = pi[b, a, gj, gi] # prediction subset corresponding to targets
134 |
135 | # Regression
136 | pxy = ps[:, :2].sigmoid() * 2. - 0.5
137 | pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i]
138 | pbox = torch.cat((pxy, pwh), 1) # predicted box
139 | iou = bbox_iou(pbox.T, tbox[i], x1y1x2y2=False, CIoU=True) # iou(prediction, target)
140 | lbox += (1.0 - iou).mean() # iou loss
141 |
142 | # Objectness
143 | tobj[b, a, gj, gi] = (1.0 - model.gr) + model.gr * iou.detach().clamp(0).type(tobj.dtype) # iou ratio
144 |
145 | # Classification
146 | if model.nc > 1: # cls loss (only if multiple classes)
147 | t = torch.full_like(ps[:, 15:], cn, device=device) # targets
148 | t[range(n), tcls[i]] = cp
149 | lcls += BCEcls(ps[:, 15:], t) # BCE
150 |
151 | # Append targets to text file
152 | # with open('targets.txt', 'a') as file:
153 | # [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)]
154 |
155 | #landmarks loss
156 | plandmarks = ps[:,5:15].sigmoid() * 8. - 4.
157 |
158 | plandmarks[:, 0:2] = plandmarks[:, 0:2] * anchors[i]
159 | plandmarks[:, 2:4] = plandmarks[:, 2:4] * anchors[i]
160 | plandmarks[:, 4:6] = plandmarks[:, 4:6] * anchors[i]
161 | plandmarks[:, 6:8] = plandmarks[:, 6:8] * anchors[i]
162 | plandmarks[:, 8:10] = plandmarks[:,8:10] * anchors[i]
163 |
164 | lmark += landmarks_loss(plandmarks, tlandmarks[i], lmks_mask[i])
165 |
166 |
167 | lobj += BCEobj(pi[..., 4], tobj) * balance[i] # obj loss
168 |
169 | s = 3 / no # output count scaling
170 | lbox *= h['box'] * s
171 | lobj *= h['obj'] * s * (1.4 if no == 4 else 1.)
172 | lcls *= h['cls'] * s
173 | lmark *= h['landmark'] * s
174 |
175 | bs = tobj.shape[0] # batch size
176 |
177 | loss = lbox + lobj + lcls + lmark
178 | return loss * bs, torch.cat((lbox, lobj, lcls, lmark, loss)).detach()
179 |
180 |
181 | def build_targets(p, targets, model):
182 | # Build targets for compute_loss(), input targets(image,class,x,y,w,h)
183 | det = model.module.model[-1] if is_parallel(model) else model.model[-1] # Detect() module
184 | na, nt = det.na, targets.shape[0] # number of anchors, targets
185 | tcls, tbox, indices, anch, landmarks, lmks_mask = [], [], [], [], [], []
186 | #gain = torch.ones(7, device=targets.device) # normalized to gridspace gain
187 | gain = torch.ones(17, device=targets.device)
188 | ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt)
189 | targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices
190 |
191 | g = 0.5 # bias
192 | off = torch.tensor([[0, 0],
193 | [1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m
194 | # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm
195 | ], device=targets.device).float() * g # offsets
196 |
197 | for i in range(det.nl):
198 | anchors = det.anchors[i]
199 | gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain
200 | #landmarks 10
201 | gain[6:16] = torch.tensor(p[i].shape)[[3, 2, 3, 2, 3, 2, 3, 2, 3, 2]] # xyxy gain
202 |
203 | # Match targets to anchors
204 | t = targets * gain
205 | if nt:
206 | # Matches
207 | r = t[:, :, 4:6] / anchors[:, None] # wh ratio
208 | j = torch.max(r, 1. / r).max(2)[0] < model.hyp['anchor_t'] # compare
209 | # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2))
210 | t = t[j] # filter
211 |
212 | # Offsets
213 | gxy = t[:, 2:4] # grid xy
214 | gxi = gain[[2, 3]] - gxy # inverse
215 | j, k = ((gxy % 1. < g) & (gxy > 1.)).T
216 | l, m = ((gxi % 1. < g) & (gxi > 1.)).T
217 | j = torch.stack((torch.ones_like(j), j, k, l, m))
218 | t = t.repeat((5, 1, 1))[j]
219 | offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j]
220 | else:
221 | t = targets[0]
222 | offsets = 0
223 |
224 | # Define
225 | b, c = t[:, :2].long().T # image, class
226 | gxy = t[:, 2:4] # grid xy
227 | gwh = t[:, 4:6] # grid wh
228 | gij = (gxy - offsets).long()
229 | gi, gj = gij.T # grid xy indices
230 |
231 | # Append
232 | a = t[:, 16].long() # anchor indices
233 | indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices
234 | tbox.append(torch.cat((gxy - gij, gwh), 1)) # box
235 | anch.append(anchors[a]) # anchors
236 | tcls.append(c) # class
237 |
238 | #landmarks
239 | lks = t[:,6:16]
240 | #lks_mask = lks > 0
241 | #lks_mask = lks_mask.float()
242 | lks_mask = torch.where(lks < 0, torch.full_like(lks, 0.), torch.full_like(lks, 1.0))
243 |
244 | #应该是关键点的坐标除以anch的宽高才对,便于模型学习。使用gwh会导致不同关键点的编码不同,没有统一的参考标准
245 |
246 | lks[:, [0, 1]] = (lks[:, [0, 1]] - gij)
247 | lks[:, [2, 3]] = (lks[:, [2, 3]] - gij)
248 | lks[:, [4, 5]] = (lks[:, [4, 5]] - gij)
249 | lks[:, [6, 7]] = (lks[:, [6, 7]] - gij)
250 | lks[:, [8, 9]] = (lks[:, [8, 9]] - gij)
251 |
252 | '''
253 | #anch_w = torch.ones(5, device=targets.device).fill_(anchors[0][0])
254 | #anch_wh = torch.ones(5, device=targets.device)
255 | anch_f_0 = (a == 0).unsqueeze(1).repeat(1, 5)
256 | anch_f_1 = (a == 1).unsqueeze(1).repeat(1, 5)
257 | anch_f_2 = (a == 2).unsqueeze(1).repeat(1, 5)
258 | lks[:, [0, 2, 4, 6, 8]] = torch.where(anch_f_0, lks[:, [0, 2, 4, 6, 8]] / anchors[0][0], lks[:, [0, 2, 4, 6, 8]])
259 | lks[:, [0, 2, 4, 6, 8]] = torch.where(anch_f_1, lks[:, [0, 2, 4, 6, 8]] / anchors[1][0], lks[:, [0, 2, 4, 6, 8]])
260 | lks[:, [0, 2, 4, 6, 8]] = torch.where(anch_f_2, lks[:, [0, 2, 4, 6, 8]] / anchors[2][0], lks[:, [0, 2, 4, 6, 8]])
261 |
262 | lks[:, [1, 3, 5, 7, 9]] = torch.where(anch_f_0, lks[:, [1, 3, 5, 7, 9]] / anchors[0][1], lks[:, [1, 3, 5, 7, 9]])
263 | lks[:, [1, 3, 5, 7, 9]] = torch.where(anch_f_1, lks[:, [1, 3, 5, 7, 9]] / anchors[1][1], lks[:, [1, 3, 5, 7, 9]])
264 | lks[:, [1, 3, 5, 7, 9]] = torch.where(anch_f_2, lks[:, [1, 3, 5, 7, 9]] / anchors[2][1], lks[:, [1, 3, 5, 7, 9]])
265 |
266 | #new_lks = lks[lks_mask>0]
267 | #print('new_lks: min --- ', torch.min(new_lks), ' max --- ', torch.max(new_lks))
268 |
269 | lks_mask_1 = torch.where(lks < -3, torch.full_like(lks, 0.), torch.full_like(lks, 1.0))
270 | lks_mask_2 = torch.where(lks > 3, torch.full_like(lks, 0.), torch.full_like(lks, 1.0))
271 |
272 | lks_mask_new = lks_mask * lks_mask_1 * lks_mask_2
273 | lks_mask_new[:, 0] = lks_mask_new[:, 0] * lks_mask_new[:, 1]
274 | lks_mask_new[:, 1] = lks_mask_new[:, 0] * lks_mask_new[:, 1]
275 | lks_mask_new[:, 2] = lks_mask_new[:, 2] * lks_mask_new[:, 3]
276 | lks_mask_new[:, 3] = lks_mask_new[:, 2] * lks_mask_new[:, 3]
277 | lks_mask_new[:, 4] = lks_mask_new[:, 4] * lks_mask_new[:, 5]
278 | lks_mask_new[:, 5] = lks_mask_new[:, 4] * lks_mask_new[:, 5]
279 | lks_mask_new[:, 6] = lks_mask_new[:, 6] * lks_mask_new[:, 7]
280 | lks_mask_new[:, 7] = lks_mask_new[:, 6] * lks_mask_new[:, 7]
281 | lks_mask_new[:, 8] = lks_mask_new[:, 8] * lks_mask_new[:, 9]
282 | lks_mask_new[:, 9] = lks_mask_new[:, 8] * lks_mask_new[:, 9]
283 | '''
284 | lks_mask_new = lks_mask
285 | lmks_mask.append(lks_mask_new)
286 | landmarks.append(lks)
287 | #print('lks: ', lks.size())
288 |
289 | return tcls, tbox, indices, anch, landmarks, lmks_mask
290 |
--------------------------------------------------------------------------------
/yolo.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import logging
3 | import math
4 | import sys
5 | from copy import deepcopy
6 | from pathlib import Path
7 |
8 | import torch
9 | import torch.nn as nn
10 |
11 | sys.path.append('./') # to run '$ python *.py' files in subdirectories
12 | logger = logging.getLogger(__name__)
13 |
14 | from models.common import Conv, Bottleneck, SPP, DWConv, Focus, BottleneckCSP, C3, Concat, NMS, autoShape
15 | from models.experimental import MixConv2d, CrossConv
16 | from utils.autoanchor import check_anchor_order
17 | from utils.general import make_divisible, check_file, set_logging
18 | from utils.torch_utils import time_synchronized, fuse_conv_and_bn, model_info, scale_img, initialize_weights, \
19 | select_device, copy_attr
20 |
21 | try:
22 | import thop # for FLOPS computation
23 | except ImportError:
24 | thop = None
25 |
26 |
27 | class Detect(nn.Module):
28 | stride = None # strides computed during build
29 | export = False # onnx export
30 |
31 | def __init__(self, nc=80, anchors=(), ch=()): # detection layer
32 | super(Detect, self).__init__()
33 | self.nc = nc # number of classes
34 | #self.no = nc + 5 # number of outputs per anchor
35 | self.no = nc + 5 + 10 # number of outputs per anchor
36 |
37 | self.nl = len(anchors) # number of detection layers
38 | self.na = len(anchors[0]) // 2 # number of anchors
39 | self.grid = [torch.zeros(1)] * self.nl # init grid
40 | a = torch.tensor(anchors).float().view(self.nl, -1, 2)
41 | self.register_buffer('anchors', a) # shape(nl,na,2)
42 | self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2)
43 | self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv
44 |
45 | def forward(self, x):
46 | # x = x.copy() # for profiling
47 | z = [] # inference output
48 | self.training |= self.export
49 | for i in range(self.nl):
50 | x[i] = self.m[i](x[i]) # conv
51 | bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85)
52 | x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous()
53 |
54 | if not self.training: # inference
55 | if self.grid[i].shape[2:4] != x[i].shape[2:4]:
56 | self.grid[i] = self._make_grid(nx, ny).to(x[i].device)
57 |
58 | #y = torch.full_like(x[i], 0)
59 | #y[..., [0,1,2,3,4,15]] = x[i][..., [0,1,2,3,4,15]].sigmoid()
60 | #y[..., 5:15] = x[i][..., 5:15]
61 | y = x[i].sigmoid()
62 |
63 | y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i].to(x[i].device)) * self.stride[i] # xy
64 | y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh
65 |
66 | y[..., 5:15] = y[..., 5:15] * 8 - 4
67 | y[..., 5:7] = y[..., 5:7] * self.anchor_grid[i] + self.grid[i].to(x[i].device) * self.stride[i] # landmark x1 y1
68 | y[..., 7:9] = y[..., 7:9] * self.anchor_grid[i] + self.grid[i].to(x[i].device) * self.stride[i]# landmark x2 y2
69 | y[..., 9:11] = y[..., 9:11] * self.anchor_grid[i] + self.grid[i].to(x[i].device) * self.stride[i]# landmark x3 y3
70 | y[..., 11:13] = y[..., 11:13] * self.anchor_grid[i] + self.grid[i].to(x[i].device) * self.stride[i]# landmark x4 y4
71 | y[..., 13:15] = y[..., 13:15] * self.anchor_grid[i] + self.grid[i].to(x[i].device) * self.stride[i]# landmark x5 y5
72 |
73 | #y[..., 5:7] = (y[..., 5:7] * 2 -1) * self.anchor_grid[i] # landmark x1 y1
74 | #y[..., 7:9] = (y[..., 7:9] * 2 -1) * self.anchor_grid[i] # landmark x2 y2
75 | #y[..., 9:11] = (y[..., 9:11] * 2 -1) * self.anchor_grid[i] # landmark x3 y3
76 | #y[..., 11:13] = (y[..., 11:13] * 2 -1) * self.anchor_grid[i] # landmark x4 y4
77 | #y[..., 13:15] = (y[..., 13:15] * 2 -1) * self.anchor_grid[i] # landmark x5 y5
78 |
79 | z.append(y.view(bs, -1, self.no))
80 |
81 | return x if self.training else (torch.cat(z, 1), x)
82 |
83 | @staticmethod
84 | def _make_grid(nx=20, ny=20):
85 | yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)])
86 | return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float()
87 |
88 |
89 | class Model(nn.Module):
90 | def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None): # model, input channels, number of classes
91 | super(Model, self).__init__()
92 | if isinstance(cfg, dict):
93 | self.yaml = cfg # model dict
94 | else: # is *.yaml
95 | import yaml # for torch hub
96 | self.yaml_file = Path(cfg).name
97 | with open(cfg) as f:
98 | self.yaml = yaml.load(f, Loader=yaml.FullLoader) # model dict
99 |
100 | # Define model
101 | ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels
102 | if nc and nc != self.yaml['nc']:
103 | logger.info('Overriding model.yaml nc=%g with nc=%g' % (self.yaml['nc'], nc))
104 | self.yaml['nc'] = nc # override yaml value
105 | self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist
106 | self.names = [str(i) for i in range(self.yaml['nc'])] # default names
107 | # print([x.shape for x in self.forward(torch.zeros(1, ch, 64, 64))])
108 |
109 | # Build strides, anchors
110 | m = self.model[-1] # Detect()
111 | if isinstance(m, Detect):
112 | s = 128 # 2x min stride
113 | m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward
114 | m.anchors /= m.stride.view(-1, 1, 1)
115 | check_anchor_order(m)
116 | self.stride = m.stride
117 | self._initialize_biases() # only run once
118 | # print('Strides: %s' % m.stride.tolist())
119 |
120 | # Init weights, biases
121 | initialize_weights(self)
122 | self.info()
123 | logger.info('')
124 |
125 | def forward(self, x, augment=False, profile=False):
126 | if augment:
127 | img_size = x.shape[-2:] # height, width
128 | s = [1, 0.83, 0.67] # scales
129 | f = [None, 3, None] # flips (2-ud, 3-lr)
130 | y = [] # outputs
131 | for si, fi in zip(s, f):
132 | xi = scale_img(x.flip(fi) if fi else x, si)
133 | yi = self.forward_once(xi)[0] # forward
134 | # cv2.imwrite('img%g.jpg' % s, 255 * xi[0].numpy().transpose((1, 2, 0))[:, :, ::-1]) # save
135 | yi[..., :4] /= si # de-scale
136 | if fi == 2:
137 | yi[..., 1] = img_size[0] - yi[..., 1] # de-flip ud
138 | elif fi == 3:
139 | yi[..., 0] = img_size[1] - yi[..., 0] # de-flip lr
140 | y.append(yi)
141 | return torch.cat(y, 1), None # augmented inference, train
142 | else:
143 | return self.forward_once(x, profile) # single-scale inference, train
144 |
145 | def forward_once(self, x, profile=False):
146 | y, dt = [], [] # outputs
147 | for m in self.model:
148 | if m.f != -1: # if not from previous layer
149 | x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers
150 |
151 | if profile:
152 | o = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPS
153 | t = time_synchronized()
154 | for _ in range(10):
155 | _ = m(x)
156 | dt.append((time_synchronized() - t) * 100)
157 | print('%10.1f%10.0f%10.1fms %-40s' % (o, m.np, dt[-1], m.type))
158 |
159 | x = m(x) # run
160 | y.append(x if m.i in self.save else None) # save output
161 |
162 | if profile:
163 | print('%.1fms total' % sum(dt))
164 | return x
165 |
166 | def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency
167 | # https://arxiv.org/abs/1708.02002 section 3.3
168 | # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1.
169 | m = self.model[-1] # Detect() module
170 | for mi, s in zip(m.m, m.stride): # from
171 | b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85)
172 | b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image)
173 | b.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls
174 | mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
175 |
176 | def _print_biases(self):
177 | m = self.model[-1] # Detect() module
178 | for mi in m.m: # from
179 | b = mi.bias.detach().view(m.na, -1).T # conv.bias(255) to (3,85)
180 | print(('%6g Conv2d.bias:' + '%10.3g' * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean()))
181 |
182 | # def _print_weights(self):
183 | # for m in self.model.modules():
184 | # if type(m) is Bottleneck:
185 | # print('%10.3g' % (m.w.detach().sigmoid() * 2)) # shortcut weights
186 |
187 | def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers
188 | print('Fusing layers... ')
189 | for m in self.model.modules():
190 | if type(m) is Conv and hasattr(m, 'bn'):
191 | m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv
192 | delattr(m, 'bn') # remove batchnorm
193 | m.forward = m.fuseforward # update forward
194 | self.info()
195 | return self
196 |
197 | def nms(self, mode=True): # add or remove NMS module
198 | present = type(self.model[-1]) is NMS # last layer is NMS
199 | if mode and not present:
200 | print('Adding NMS... ')
201 | m = NMS() # module
202 | m.f = -1 # from
203 | m.i = self.model[-1].i + 1 # index
204 | self.model.add_module(name='%s' % m.i, module=m) # add
205 | self.eval()
206 | elif not mode and present:
207 | print('Removing NMS... ')
208 | self.model = self.model[:-1] # remove
209 | return self
210 |
211 | def autoshape(self): # add autoShape module
212 | print('Adding autoShape... ')
213 | m = autoShape(self) # wrap model
214 | copy_attr(m, self, include=('yaml', 'nc', 'hyp', 'names', 'stride'), exclude=()) # copy attributes
215 | return m
216 |
217 | def info(self, verbose=False, img_size=640): # print model information
218 | model_info(self, verbose, img_size)
219 |
220 |
221 | def parse_model(d, ch): # model_dict, input_channels(3)
222 | logger.info('\n%3s%18s%3s%10s %-40s%-30s' % ('', 'from', 'n', 'params', 'module', 'arguments'))
223 | anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple']
224 | na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors
225 | no = na * (nc + 5) # number of outputs = anchors * (classes + 5)
226 |
227 | layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out
228 | for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args
229 | m = eval(m) if isinstance(m, str) else m # eval strings
230 | for j, a in enumerate(args):
231 | try:
232 | args[j] = eval(a) if isinstance(a, str) else a # eval strings
233 | except:
234 | pass
235 |
236 | n = max(round(n * gd), 1) if n > 1 else n # depth gain
237 | if m in [Conv, Bottleneck, SPP, DWConv, MixConv2d, Focus, CrossConv, BottleneckCSP, C3]:
238 | c1, c2 = ch[f], args[0]
239 |
240 | # Normal
241 | # if i > 0 and args[0] != no: # channel expansion factor
242 | # ex = 1.75 # exponential (default 2.0)
243 | # e = math.log(c2 / ch[1]) / math.log(2)
244 | # c2 = int(ch[1] * ex ** e)
245 | # if m != Focus:
246 |
247 | c2 = make_divisible(c2 * gw, 8) if c2 != no else c2
248 |
249 | # Experimental
250 | # if i > 0 and args[0] != no: # channel expansion factor
251 | # ex = 1 + gw # exponential (default 2.0)
252 | # ch1 = 32 # ch[1]
253 | # e = math.log(c2 / ch1) / math.log(2) # level 1-n
254 | # c2 = int(ch1 * ex ** e)
255 | # if m != Focus:
256 | # c2 = make_divisible(c2, 8) if c2 != no else c2
257 |
258 | args = [c1, c2, *args[1:]]
259 | if m in [BottleneckCSP, C3]:
260 | args.insert(2, n)
261 | n = 1
262 | elif m is nn.BatchNorm2d:
263 | args = [ch[f]]
264 | elif m is Concat:
265 | c2 = sum([ch[-1 if x == -1 else x + 1] for x in f])
266 | elif m is Detect:
267 | args.append([ch[x + 1] for x in f])
268 | if isinstance(args[1], int): # number of anchors
269 | args[1] = [list(range(args[1] * 2))] * len(f)
270 | else:
271 | c2 = ch[f]
272 |
273 | m_ = nn.Sequential(*[m(*args) for _ in range(n)]) if n > 1 else m(*args) # module
274 | t = str(m)[8:-2].replace('__main__.', '') # module type
275 | np = sum([x.numel() for x in m_.parameters()]) # number params
276 | m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params
277 | logger.info('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n, np, t, args)) # print
278 | save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist
279 | layers.append(m_)
280 | ch.append(c2)
281 | return nn.Sequential(*layers), sorted(save)
282 |
283 |
284 | if __name__ == '__main__':
285 | parser = argparse.ArgumentParser()
286 | parser.add_argument('--cfg', type=str, default='yolov5s.yaml', help='model.yaml')
287 | parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
288 | opt = parser.parse_args()
289 | opt.cfg = check_file(opt.cfg) # check file
290 | set_logging()
291 | device = select_device(opt.device)
292 |
293 | # Create model
294 | model = Model(opt.cfg).to(device)
295 | model.train()
296 |
297 | # Profile
298 | # img = torch.rand(8 if torch.cuda.is_available() else 1, 3, 640, 640).to(device)
299 | # y = model(img, profile=True)
300 |
301 | # Tensorboard
302 | # from torch.utils.tensorboard import SummaryWriter
303 | # tb_writer = SummaryWriter()
304 | # print("Run 'tensorboard --logdir=models/runs' to view tensorboard at http://localhost:6006/")
305 | # tb_writer.add_graph(model.model, img) # add model to tensorboard
306 | # tb_writer.add_image('test', img[0], dataformats='CWH') # add model to tensorboard
307 |
--------------------------------------------------------------------------------
/test.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import json
3 | import os
4 | from pathlib import Path
5 | from threading import Thread
6 |
7 | import numpy as np
8 | import torch
9 | import yaml
10 | from tqdm import tqdm
11 |
12 | from models.experimental import attempt_load
13 | from utils.datasets import create_dataloader
14 | from utils.general import coco80_to_coco91_class, check_dataset, check_file, check_img_size, box_iou, \
15 | non_max_suppression, scale_coords, xyxy2xywh, xywh2xyxy, set_logging, increment_path
16 | from utils.loss import compute_loss
17 | from utils.metrics import ap_per_class, ConfusionMatrix
18 | from utils.plots import plot_images, output_to_target, plot_study_txt
19 | from utils.torch_utils import select_device, time_synchronized
20 |
21 |
22 | def test(data,
23 | weights=None,
24 | batch_size=32,
25 | imgsz=640,
26 | conf_thres=0.001,
27 | iou_thres=0.6, # for NMS
28 | save_json=False,
29 | single_cls=False,
30 | augment=False,
31 | verbose=False,
32 | model=None,
33 | dataloader=None,
34 | save_dir=Path(''), # for saving images
35 | save_txt=False, # for auto-labelling
36 | save_hybrid=False, # for hybrid auto-labelling
37 | save_conf=False, # save auto-label confidences
38 | plots=True,
39 | log_imgs=0): # number of logged images
40 |
41 | # Initialize/load model and set device
42 | training = model is not None
43 | if training: # called by train.py
44 | device = next(model.parameters()).device # get model device
45 |
46 | else: # called directly
47 | set_logging()
48 | device = select_device(opt.device, batch_size=batch_size)
49 |
50 | # Directories
51 | save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run
52 | (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
53 |
54 | # Load model
55 | model = attempt_load(weights, map_location=device) # load FP32 model
56 | imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size
57 |
58 | # Multi-GPU disabled, incompatible with .half() https://github.com/ultralytics/yolov5/issues/99
59 | # if device.type != 'cpu' and torch.cuda.device_count() > 1:
60 | # model = nn.DataParallel(model)
61 |
62 | # Half
63 | half = device.type != 'cpu' # half precision only supported on CUDA
64 | if half:
65 | model.half()
66 |
67 | # Configure
68 | model.eval()
69 | is_coco = data.endswith('coco.yaml') # is COCO dataset
70 | with open(data) as f:
71 | data = yaml.load(f, Loader=yaml.FullLoader) # model dict
72 | check_dataset(data) # check
73 | nc = 1 if single_cls else int(data['nc']) # number of classes
74 | iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95
75 | niou = iouv.numel()
76 |
77 | # Logging
78 | log_imgs, wandb = min(log_imgs, 100), None # ceil
79 | try:
80 | import wandb # Weights & Biases
81 | except ImportError:
82 | log_imgs = 0
83 |
84 | # Dataloader
85 | if not training:
86 | img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img
87 | _ = model(img.half() if half else img) if device.type != 'cpu' else None # run once
88 | path = data['test'] if opt.task == 'test' else data['val'] # path to val/test images
89 | dataloader = create_dataloader(path, imgsz, batch_size, model.stride.max(), opt, pad=0.5, rect=True)[0]
90 |
91 | seen = 0
92 | confusion_matrix = ConfusionMatrix(nc=nc)
93 | names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)}
94 | coco91class = coco80_to_coco91_class()
95 | s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R', 'mAP@.5', 'mAP@.5:.95')
96 | p, r, f1, mp, mr, map50, map, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0.
97 | loss = torch.zeros(3, device=device)
98 | jdict, stats, ap, ap_class, wandb_images = [], [], [], [], []
99 | for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)):
100 | img = img.to(device, non_blocking=True)
101 | img = img.half() if half else img.float() # uint8 to fp16/32
102 | img /= 255.0 # 0 - 255 to 0.0 - 1.0
103 | targets = targets.to(device)
104 | nb, _, height, width = img.shape # batch size, channels, height, width
105 |
106 | with torch.no_grad():
107 | # Run model
108 | t = time_synchronized()
109 | inf_out, train_out = model(img, augment=augment) # inference and training outputs
110 | t0 += time_synchronized() - t
111 |
112 | # Compute loss
113 | if training:
114 | loss += compute_loss([x.float() for x in train_out], targets, model)[1][:3] # box, obj, cls
115 |
116 | # Run NMS
117 | targets[:, 2:6] *= torch.Tensor([width, height, width, height]).to(device) # to pixels
118 | lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling
119 | t = time_synchronized()
120 | output = non_max_suppression(inf_out, conf_thres=conf_thres, iou_thres=iou_thres, labels=lb)
121 | t1 += time_synchronized() - t
122 |
123 | # Statistics per image
124 | for si, pred in enumerate(output):
125 | labels = targets[targets[:, 0] == si, 1:]
126 | nl = len(labels)
127 | tcls = labels[:, 0].tolist() if nl else [] # target class
128 | path = Path(paths[si])
129 | seen += 1
130 |
131 | if len(pred) == 0:
132 | if nl:
133 | stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls))
134 | continue
135 |
136 | # Predictions
137 | predn = pred.clone()
138 | scale_coords(img[si].shape[1:], predn[:, :4], shapes[si][0], shapes[si][1]) # native-space pred
139 |
140 | # Append to text file
141 | if save_txt:
142 | gn = torch.tensor(shapes[si][0])[[1, 0, 1, 0]] # normalization gain whwh
143 | for *xyxy, conf, cls in predn.tolist():
144 | xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
145 | line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
146 | with open(save_dir / 'labels' / (path.stem + '.txt'), 'a') as f:
147 | f.write(('%g ' * len(line)).rstrip() % line + '\n')
148 |
149 | # W&B logging
150 | if plots and len(wandb_images) < log_imgs:
151 | box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]},
152 | "class_id": int(cls),
153 | "box_caption": "%s %.3f" % (names[cls], conf),
154 | "scores": {"class_score": conf},
155 | "domain": "pixel"} for *xyxy, conf, cls in pred.tolist()]
156 | boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space
157 | wandb_images.append(wandb.Image(img[si], boxes=boxes, caption=path.name))
158 |
159 | # Append to pycocotools JSON dictionary
160 | if save_json:
161 | # [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ...
162 | image_id = int(path.stem) if path.stem.isnumeric() else path.stem
163 | box = xyxy2xywh(predn[:, :4]) # xywh
164 | box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
165 | for p, b in zip(pred.tolist(), box.tolist()):
166 | jdict.append({'image_id': image_id,
167 | 'category_id': coco91class[int(p[15])] if is_coco else int(p[15]),
168 | 'bbox': [round(x, 3) for x in b],
169 | 'score': round(p[4], 5)})
170 |
171 | # Assign all predictions as incorrect
172 | correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool, device=device)
173 | if nl:
174 | detected = [] # target indices
175 | tcls_tensor = labels[:, 0]
176 |
177 | # target boxes
178 | tbox = xywh2xyxy(labels[:, 1:5])
179 | scale_coords(img[si].shape[1:], tbox, shapes[si][0], shapes[si][1]) # native-space labels
180 | if plots:
181 | confusion_matrix.process_batch(pred, torch.cat((labels[:, 0:1], tbox), 1))
182 |
183 | # Per target class
184 | for cls in torch.unique(tcls_tensor):
185 | ti = (cls == tcls_tensor).nonzero(as_tuple=False).view(-1) # prediction indices
186 | pi = (cls == pred[:, 5]).nonzero(as_tuple=False).view(-1) # target indices
187 |
188 | # Search for detections
189 | if pi.shape[0]:
190 | # Prediction to target ious
191 | ious, i = box_iou(predn[pi, :4], tbox[ti]).max(1) # best ious, indices
192 |
193 | # Append detections
194 | detected_set = set()
195 | for j in (ious > iouv[0]).nonzero(as_tuple=False):
196 | d = ti[i[j]] # detected target
197 | if d.item() not in detected_set:
198 | detected_set.add(d.item())
199 | detected.append(d)
200 | correct[pi[j]] = ious[j] > iouv # iou_thres is 1xn
201 | if len(detected) == nl: # all targets already located in image
202 | break
203 |
204 | # Append statistics (correct, conf, pcls, tcls)
205 | stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls))
206 |
207 | # Plot images
208 | if plots and batch_i < 3:
209 | f = save_dir / f'test_batch{batch_i}_labels.jpg' # labels
210 | Thread(target=plot_images, args=(img, targets, paths, f, names), daemon=True).start()
211 | f = save_dir / f'test_batch{batch_i}_pred.jpg' # predictions
212 | Thread(target=plot_images, args=(img, output_to_target(output), paths, f, names), daemon=True).start()
213 |
214 | # Compute statistics
215 | stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy
216 | if len(stats) and stats[0].any():
217 | p, r, ap, f1, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names)
218 | p, r, ap50, ap = p[:, 0], r[:, 0], ap[:, 0], ap.mean(1) # [P, R, AP@0.5, AP@0.5:0.95]
219 | mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean()
220 | nt = np.bincount(stats[3].astype(np.int64), minlength=nc) # number of targets per class
221 | else:
222 | nt = torch.zeros(1)
223 |
224 | # Print results
225 | pf = '%20s' + '%12.3g' * 6 # print format
226 | print(pf % ('all', seen, nt.sum(), mp, mr, map50, map))
227 |
228 | # Print results per class
229 | if verbose and nc > 1 and len(stats):
230 | for i, c in enumerate(ap_class):
231 | print(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i]))
232 |
233 | # Print speeds
234 | t = tuple(x / seen * 1E3 for x in (t0, t1, t0 + t1)) + (imgsz, imgsz, batch_size) # tuple
235 | if not training:
236 | print('Speed: %.1f/%.1f/%.1f ms inference/NMS/total per %gx%g image at batch-size %g' % t)
237 |
238 | # Plots
239 | if plots:
240 | confusion_matrix.plot(save_dir=save_dir, names=list(names.values()))
241 | if wandb and wandb.run:
242 | wandb.log({"Images": wandb_images})
243 | wandb.log({"Validation": [wandb.Image(str(f), caption=f.name) for f in sorted(save_dir.glob('test*.jpg'))]})
244 |
245 | # Save JSON
246 | if save_json and len(jdict):
247 | w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights
248 | anno_json = '../coco/annotations/instances_val2017.json' # annotations json
249 | pred_json = str(save_dir / f"{w}_predictions.json") # predictions json
250 | print('\nEvaluating pycocotools mAP... saving %s...' % pred_json)
251 | with open(pred_json, 'w') as f:
252 | json.dump(jdict, f)
253 |
254 | try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
255 | from pycocotools.coco import COCO
256 | from pycocotools.cocoeval import COCOeval
257 |
258 | anno = COCO(anno_json) # init annotations api
259 | pred = anno.loadRes(pred_json) # init predictions api
260 | eval = COCOeval(anno, pred, 'bbox')
261 | if is_coco:
262 | eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.img_files] # image IDs to evaluate
263 | eval.evaluate()
264 | eval.accumulate()
265 | eval.summarize()
266 | map, map50 = eval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5)
267 | except Exception as e:
268 | print(f'pycocotools unable to run: {e}')
269 |
270 | # Return results
271 | if not training:
272 | s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
273 | print(f"Results saved to {save_dir}{s}")
274 | model.float() # for training
275 | maps = np.zeros(nc) + map
276 | for i, c in enumerate(ap_class):
277 | maps[c] = ap[i]
278 | return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t
279 |
280 |
281 | if __name__ == '__main__':
282 | parser = argparse.ArgumentParser(prog='test.py')
283 | parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)')
284 | parser.add_argument('--data', type=str, default='data/coco128.yaml', help='*.data path')
285 | parser.add_argument('--batch-size', type=int, default=32, help='size of each image batch')
286 | parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
287 | parser.add_argument('--conf-thres', type=float, default=0.001, help='object confidence threshold')
288 | parser.add_argument('--iou-thres', type=float, default=0.6, help='IOU threshold for NMS')
289 | parser.add_argument('--task', default='val', help="'val', 'test', 'study'")
290 | parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
291 | parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset')
292 | parser.add_argument('--augment', action='store_true', help='augmented inference')
293 | parser.add_argument('--verbose', action='store_true', help='report mAP by class')
294 | parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
295 | parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt')
296 | parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
297 | parser.add_argument('--save-json', action='store_true', help='save a cocoapi-compatible JSON results file')
298 | parser.add_argument('--project', default='runs/test', help='save to project/name')
299 | parser.add_argument('--name', default='exp', help='save to project/name')
300 | parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
301 | opt = parser.parse_args()
302 | opt.save_json |= opt.data.endswith('coco.yaml')
303 | opt.data = check_file(opt.data) # check file
304 | print(opt)
305 |
306 | if opt.task in ['val', 'test']: # run normally
307 | test(opt.data,
308 | opt.weights,
309 | opt.batch_size,
310 | opt.img_size,
311 | opt.conf_thres,
312 | opt.iou_thres,
313 | opt.save_json,
314 | opt.single_cls,
315 | opt.augment,
316 | opt.verbose,
317 | save_txt=opt.save_txt | opt.save_hybrid,
318 | save_hybrid=opt.save_hybrid,
319 | save_conf=opt.save_conf,
320 | )
321 |
322 | elif opt.task == 'study': # run over a range of settings and save/plot
323 | for weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']:
324 | f = 'study_%s_%s.txt' % (Path(opt.data).stem, Path(weights).stem) # filename to save to
325 | x = list(range(320, 800, 64)) # x axis
326 | y = [] # y axis
327 | for i in x: # img-size
328 | print('\nRunning %s point %s...' % (f, i))
329 | r, _, t = test(opt.data, weights, opt.batch_size, i, opt.conf_thres, opt.iou_thres, opt.save_json,
330 | plots=False)
331 | y.append(r + t) # results and times
332 | np.savetxt(f, y, fmt='%10.4g') # save
333 | os.system('zip -r study.zip study_*.txt')
334 | plot_study_txt(f, x) # plot
335 |
--------------------------------------------------------------------------------
/general.py:
--------------------------------------------------------------------------------
1 | # General utils
2 |
3 | import glob
4 | import logging
5 | import math
6 | import os
7 | import platform
8 | import random
9 | import re
10 | import subprocess
11 | import time
12 | from pathlib import Path
13 |
14 | import cv2
15 | import numpy as np
16 | import torch
17 | import torchvision
18 | import yaml
19 |
20 | from utils.google_utils import gsutil_getsize
21 | from utils.metrics import fitness
22 | from utils.torch_utils import init_torch_seeds
23 |
24 | # Settings
25 | torch.set_printoptions(linewidth=320, precision=5, profile='long')
26 | np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5
27 | cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader)
28 |
29 |
30 | def set_logging(rank=-1):
31 | logging.basicConfig(
32 | format="%(message)s",
33 | level=logging.INFO if rank in [-1, 0] else logging.WARN)
34 |
35 |
36 | def init_seeds(seed=0):
37 | random.seed(seed)
38 | np.random.seed(seed)
39 | init_torch_seeds(seed)
40 |
41 |
42 | def get_latest_run(search_dir='.'):
43 | # Return path to most recent 'last.pt' in /runs (i.e. to --resume from)
44 | last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True)
45 | return max(last_list, key=os.path.getctime) if last_list else ''
46 |
47 |
48 | def check_git_status():
49 | # Suggest 'git pull' if repo is out of date
50 | if platform.system() in ['Linux', 'Darwin'] and not os.path.isfile('/.dockerenv'):
51 | s = subprocess.check_output('if [ -d .git ]; then git fetch && git status -uno; fi', shell=True).decode('utf-8')
52 | if 'Your branch is behind' in s:
53 | print(s[s.find('Your branch is behind'):s.find('\n\n')] + '\n')
54 |
55 |
56 | def check_img_size(img_size, s=32):
57 | # Verify img_size is a multiple of stride s
58 | new_size = make_divisible(img_size, int(s)) # ceil gs-multiple
59 | if new_size != img_size:
60 | print('WARNING: --img-size %g must be multiple of max stride %g, updating to %g' % (img_size, s, new_size))
61 | return new_size
62 |
63 |
64 | def check_file(file):
65 | # Search for file if not found
66 | if os.path.isfile(file) or file == '':
67 | return file
68 | else:
69 | files = glob.glob('./**/' + file, recursive=True) # find file
70 | assert len(files), 'File Not Found: %s' % file # assert file was found
71 | assert len(files) == 1, "Multiple files match '%s', specify exact path: %s" % (file, files) # assert unique
72 | return files[0] # return file
73 |
74 |
75 | def check_dataset(dict):
76 | # Download dataset if not found locally
77 | val, s = dict.get('val'), dict.get('download')
78 | if val and len(val):
79 | val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path
80 | if not all(x.exists() for x in val):
81 | print('\nWARNING: Dataset not found, nonexistent paths: %s' % [str(x) for x in val if not x.exists()])
82 | if s and len(s): # download script
83 | print('Downloading %s ...' % s)
84 | if s.startswith('http') and s.endswith('.zip'): # URL
85 | f = Path(s).name # filename
86 | torch.hub.download_url_to_file(s, f)
87 | r = os.system('unzip -q %s -d ../ && rm %s' % (f, f)) # unzip
88 | else: # bash script
89 | r = os.system(s)
90 | print('Dataset autodownload %s\n' % ('success' if r == 0 else 'failure')) # analyze return value
91 | else:
92 | raise Exception('Dataset not found.')
93 |
94 |
95 | def make_divisible(x, divisor):
96 | # Returns x evenly divisible by divisor
97 | return math.ceil(x / divisor) * divisor
98 |
99 |
100 | def clean_str(s):
101 | # Cleans a string by replacing special characters with underscore _
102 | return re.sub(pattern="[|@#!¡·$€%&()=?¿^*;:,¨´><+]", repl="_", string=s)
103 |
104 |
105 | def labels_to_class_weights(labels, nc=80):
106 | # Get class weights (inverse frequency) from training labels
107 | if labels[0] is None: # no labels loaded
108 | return torch.Tensor()
109 |
110 | labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO
111 | classes = labels[:, 0].astype(np.int) # labels = [class xywh]
112 | weights = np.bincount(classes, minlength=nc) # occurrences per class
113 |
114 | # Prepend gridpoint count (for uCE training)
115 | # gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image
116 | # weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start
117 |
118 | weights[weights == 0] = 1 # replace empty bins with 1
119 | weights = 1 / weights # number of targets per class
120 | weights /= weights.sum() # normalize
121 | return torch.from_numpy(weights)
122 |
123 |
124 | def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):
125 | # Produces image weights based on class_weights and image contents
126 | class_counts = np.array([np.bincount(x[:, 0].astype(np.int), minlength=nc) for x in labels])
127 | image_weights = (class_weights.reshape(1, nc) * class_counts).sum(1)
128 | # index = random.choices(range(n), weights=image_weights, k=1) # weight image sample
129 | return image_weights
130 |
131 |
132 | def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)
133 | # https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/
134 | # a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n')
135 | # b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n')
136 | # x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco
137 | # x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet
138 | x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34,
139 | 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
140 | 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]
141 | return x
142 |
143 |
144 | def xyxy2xywh(x):
145 | # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right
146 | y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
147 | y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center
148 | y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center
149 | y[:, 2] = x[:, 2] - x[:, 0] # width
150 | y[:, 3] = x[:, 3] - x[:, 1] # height
151 | return y
152 |
153 |
154 | def xywh2xyxy(x):
155 | # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
156 | y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
157 | y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
158 | y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
159 | y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x
160 | y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y
161 | return y
162 |
163 |
164 | def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
165 | # Rescale coords (xyxy) from img1_shape to img0_shape
166 | if ratio_pad is None: # calculate from img0_shape
167 | gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
168 | pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
169 | else:
170 | gain = ratio_pad[0][0]
171 | pad = ratio_pad[1]
172 |
173 | coords[:, [0, 2]] -= pad[0] # x padding
174 | coords[:, [1, 3]] -= pad[1] # y padding
175 | coords[:, :4] /= gain
176 | clip_coords(coords, img0_shape)
177 | return coords
178 |
179 |
180 | def clip_coords(boxes, img_shape):
181 | # Clip bounding xyxy bounding boxes to image shape (height, width)
182 | boxes[:, 0].clamp_(0, img_shape[1]) # x1
183 | boxes[:, 1].clamp_(0, img_shape[0]) # y1
184 | boxes[:, 2].clamp_(0, img_shape[1]) # x2
185 | boxes[:, 3].clamp_(0, img_shape[0]) # y2
186 |
187 |
188 | def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-9):
189 | # Returns the IoU of box1 to box2. box1 is 4, box2 is nx4
190 | box2 = box2.T
191 |
192 | # Get the coordinates of bounding boxes
193 | if x1y1x2y2: # x1, y1, x2, y2 = box1
194 | b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
195 | b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
196 | else: # transform from xywh to xyxy
197 | b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2
198 | b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2
199 | b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2
200 | b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2
201 |
202 | # Intersection area
203 | inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \
204 | (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0)
205 |
206 | # Union Area
207 | w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps
208 | w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps
209 | union = w1 * h1 + w2 * h2 - inter + eps
210 |
211 | iou = inter / union
212 | if GIoU or DIoU or CIoU:
213 | cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width
214 | ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height
215 | if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1
216 | c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared
217 | rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 +
218 | (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center distance squared
219 | if DIoU:
220 | return iou - rho2 / c2 # DIoU
221 | elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47
222 | v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)
223 | with torch.no_grad():
224 | alpha = v / ((1 + eps) - iou + v)
225 | return iou - (rho2 / c2 + v * alpha) # CIoU
226 | else: # GIoU https://arxiv.org/pdf/1902.09630.pdf
227 | c_area = cw * ch + eps # convex area
228 | return iou - (c_area - union) / c_area # GIoU
229 | else:
230 | return iou # IoU
231 |
232 |
233 | def box_iou(box1, box2):
234 | # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py
235 | """
236 | Return intersection-over-union (Jaccard index) of boxes.
237 | Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
238 | Arguments:
239 | box1 (Tensor[N, 4])
240 | box2 (Tensor[M, 4])
241 | Returns:
242 | iou (Tensor[N, M]): the NxM matrix containing the pairwise
243 | IoU values for every element in boxes1 and boxes2
244 | """
245 |
246 | def box_area(box):
247 | # box = 4xn
248 | return (box[2] - box[0]) * (box[3] - box[1])
249 |
250 | area1 = box_area(box1.T)
251 | area2 = box_area(box2.T)
252 |
253 | # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)
254 | inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)
255 | return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter)
256 |
257 |
258 | def wh_iou(wh1, wh2):
259 | # Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2
260 | wh1 = wh1[:, None] # [N,1,2]
261 | wh2 = wh2[None] # [1,M,2]
262 | inter = torch.min(wh1, wh2).prod(2) # [N,M]
263 | return inter / (wh1.prod(2) + wh2.prod(2) - inter) # iou = inter / (area1 + area2 - inter)
264 |
265 |
266 | def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, labels=()):
267 | """Performs Non-Maximum Suppression (NMS) on inference results
268 |
269 | Returns:
270 | detections with shape: nx6 (x1, y1, x2, y2, conf, cls)
271 | """
272 |
273 | nc = prediction.shape[2] - 5 # number of classes
274 | xc = prediction[..., 4] > conf_thres # candidates
275 |
276 | # Settings
277 | min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height
278 | max_det = 300 # maximum number of detections per image
279 | time_limit = 10.0 # seconds to quit after
280 | redundant = True # require redundant detections
281 | multi_label = nc > 1 # multiple labels per box (adds 0.5ms/img)
282 | merge = False # use merge-NMS
283 |
284 | t = time.time()
285 | output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0]
286 | for xi, x in enumerate(prediction): # image index, image inference
287 | # Apply constraints
288 | # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height
289 | x = x[xc[xi]] # confidence
290 |
291 | # Cat apriori labels if autolabelling
292 | if labels and len(labels[xi]):
293 | l = labels[xi]
294 | v = torch.zeros((len(l), nc + 5), device=x.device)
295 | v[:, :4] = l[:, 1:5] # box
296 | v[:, 4] = 1.0 # conf
297 | v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls
298 | x = torch.cat((x, v), 0)
299 |
300 | # If none remain process next image
301 | if not x.shape[0]:
302 | continue
303 |
304 | # Compute conf
305 | x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf
306 |
307 | # Box (center x, center y, width, height) to (x1, y1, x2, y2)
308 | box = xywh2xyxy(x[:, :4])
309 |
310 | # Detections matrix nx6 (xyxy, conf, cls)
311 | if multi_label:
312 | i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T
313 | x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)
314 | else: # best class only
315 | conf, j = x[:, 5:].max(1, keepdim=True)
316 | x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]
317 |
318 | # Filter by class
319 | if classes is not None:
320 | x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]
321 |
322 | # Apply finite constraint
323 | # if not torch.isfinite(x).all():
324 | # x = x[torch.isfinite(x).all(1)]
325 |
326 | # If none remain process next image
327 | n = x.shape[0] # number of boxes
328 | if not n:
329 | continue
330 |
331 | # Sort by confidence
332 | # x = x[x[:, 4].argsort(descending=True)]
333 |
334 | # Batched NMS
335 | c = x[:, 5:6] * (0 if agnostic else max_wh) # classes
336 | boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores
337 | i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS
338 | if i.shape[0] > max_det: # limit detections
339 | i = i[:max_det]
340 | if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)
341 | # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
342 | iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix
343 | weights = iou * scores[None] # box weights
344 | x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes
345 | if redundant:
346 | i = i[iou.sum(1) > 1] # require redundancy
347 |
348 | output[xi] = x[i]
349 | if (time.time() - t) > time_limit:
350 | break # time limit exceeded
351 |
352 | return output
353 |
354 |
355 | def non_max_suppression_face(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, labels=()):
356 | """Performs Non-Maximum Suppression (NMS) on inference results
357 |
358 | Returns:
359 | detections with shape: nx6 (x1, y1, x2, y2, conf, cls)
360 | """
361 |
362 | nc = prediction.shape[2] - 15 # number of classes
363 | xc = prediction[..., 4] > conf_thres # candidates
364 |
365 | # Settings
366 | min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height
367 | max_det = 300 # maximum number of detections per image
368 | time_limit = 10.0 # seconds to quit after
369 | redundant = True # require redundant detections
370 | multi_label = nc > 1 # multiple labels per box (adds 0.5ms/img)
371 | merge = False # use merge-NMS
372 |
373 | t = time.time()
374 | output = [torch.zeros((0, 16), device=prediction.device)] * prediction.shape[0]
375 | for xi, x in enumerate(prediction): # image index, image inference
376 | # Apply constraints
377 | # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height
378 | x = x[xc[xi]] # confidence
379 |
380 | # Cat apriori labels if autolabelling
381 | if labels and len(labels[xi]):
382 | l = labels[xi]
383 | v = torch.zeros((len(l), nc + 15), device=x.device)
384 | v[:, :4] = l[:, 1:5] # box
385 | v[:, 4] = 1.0 # conf
386 | v[range(len(l)), l[:, 0].long() + 15] = 1.0 # cls
387 | x = torch.cat((x, v), 0)
388 |
389 | # If none remain process next image
390 | if not x.shape[0]:
391 | continue
392 |
393 | # Compute conf
394 | x[:, 15:] *= x[:, 4:5] # conf = obj_conf * cls_conf
395 |
396 | # Box (center x, center y, width, height) to (x1, y1, x2, y2)
397 | box = xywh2xyxy(x[:, :4])
398 |
399 | #landmarks = x[:, 5:15]
400 |
401 | # Detections matrix nx6 (xyxy, conf, landmarks, cls)
402 | if multi_label:
403 | i, j = (x[:, 15:] > conf_thres).nonzero(as_tuple=False).T
404 | x = torch.cat((box[i], x[i, j + 15, None], x[:, 5:15] ,j[:, None].float()), 1)
405 | else: # best class only
406 | conf, j = x[:, 15:].max(1, keepdim=True)
407 | x = torch.cat((box, conf, x[:, 5:15], j.float()), 1)[conf.view(-1) > conf_thres]
408 |
409 | # Filter by class
410 | if classes is not None:
411 | x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]
412 |
413 | # Apply finite constraint
414 | # if not torch.isfinite(x).all():
415 | # x = x[torch.isfinite(x).all(1)]
416 |
417 | # If none remain process next image
418 | n = x.shape[0] # number of boxes
419 | if not n:
420 | continue
421 |
422 | # Sort by confidence
423 | # x = x[x[:, 4].argsort(descending=True)]
424 |
425 | # Batched NMS
426 | c = x[:, 15:16] * (0 if agnostic else max_wh) # classes
427 | boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores
428 | i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS
429 | if i.shape[0] > max_det: # limit detections
430 | i = i[:max_det]
431 | if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)
432 | # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
433 | iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix
434 | weights = iou * scores[None] # box weights
435 | x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes
436 | if redundant:
437 | i = i[iou.sum(1) > 1] # require redundancy
438 |
439 | output[xi] = x[i]
440 | if (time.time() - t) > time_limit:
441 | break # time limit exceeded
442 |
443 | return output
444 |
445 |
446 |
447 | def strip_optimizer(f='weights/best.pt', s=''): # from utils.general import *; strip_optimizer()
448 | # Strip optimizer from 'f' to finalize training, optionally save as 's'
449 | x = torch.load(f, map_location=torch.device('cpu'))
450 | x['optimizer'] = None
451 | x['training_results'] = None
452 | x['epoch'] = -1
453 | x['model'].half() # to FP16
454 | for p in x['model'].parameters():
455 | p.requires_grad = False
456 | torch.save(x, s or f)
457 | mb = os.path.getsize(s or f) / 1E6 # filesize
458 | print('Optimizer stripped from %s,%s %.1fMB' % (f, (' saved as %s,' % s) if s else '', mb))
459 |
460 |
461 | def print_mutation(hyp, results, yaml_file='hyp_evolved.yaml', bucket=''):
462 | # Print mutation results to evolve.txt (for use with train.py --evolve)
463 | a = '%10s' * len(hyp) % tuple(hyp.keys()) # hyperparam keys
464 | b = '%10.3g' * len(hyp) % tuple(hyp.values()) # hyperparam values
465 | c = '%10.4g' * len(results) % results # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3)
466 | print('\n%s\n%s\nEvolved fitness: %s\n' % (a, b, c))
467 |
468 | if bucket:
469 | url = 'gs://%s/evolve.txt' % bucket
470 | if gsutil_getsize(url) > (os.path.getsize('evolve.txt') if os.path.exists('evolve.txt') else 0):
471 | os.system('gsutil cp %s .' % url) # download evolve.txt if larger than local
472 |
473 | with open('evolve.txt', 'a') as f: # append result
474 | f.write(c + b + '\n')
475 | x = np.unique(np.loadtxt('evolve.txt', ndmin=2), axis=0) # load unique rows
476 | x = x[np.argsort(-fitness(x))] # sort
477 | np.savetxt('evolve.txt', x, '%10.3g') # save sort by fitness
478 |
479 | # Save yaml
480 | for i, k in enumerate(hyp.keys()):
481 | hyp[k] = float(x[0, i + 7])
482 | with open(yaml_file, 'w') as f:
483 | results = tuple(x[0, :7])
484 | c = '%10.4g' * len(results) % results # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3)
485 | f.write('# Hyperparameter Evolution Results\n# Generations: %g\n# Metrics: ' % len(x) + c + '\n\n')
486 | yaml.dump(hyp, f, sort_keys=False)
487 |
488 | if bucket:
489 | os.system('gsutil cp evolve.txt %s gs://%s' % (yaml_file, bucket)) # upload
490 |
491 |
492 | def apply_classifier(x, model, img, im0):
493 | # applies a second stage classifier to yolo outputs
494 | im0 = [im0] if isinstance(im0, np.ndarray) else im0
495 | for i, d in enumerate(x): # per image
496 | if d is not None and len(d):
497 | d = d.clone()
498 |
499 | # Reshape and pad cutouts
500 | b = xyxy2xywh(d[:, :4]) # boxes
501 | b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square
502 | b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad
503 | d[:, :4] = xywh2xyxy(b).long()
504 |
505 | # Rescale boxes from img_size to im0 size
506 | scale_coords(img.shape[2:], d[:, :4], im0[i].shape)
507 |
508 | # Classes
509 | pred_cls1 = d[:, 5].long()
510 | ims = []
511 | for j, a in enumerate(d): # per item
512 | cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])]
513 | im = cv2.resize(cutout, (224, 224)) # BGR
514 | # cv2.imwrite('test%i.jpg' % j, cutout)
515 |
516 | im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
517 | im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32
518 | im /= 255.0 # 0 - 255 to 0.0 - 1.0
519 | ims.append(im)
520 |
521 | pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction
522 | x[i] = x[i][pred_cls1 == pred_cls2] # retain matching class detections
523 |
524 | return x
525 |
526 |
527 | def increment_path(path, exist_ok=True, sep=''):
528 | # Increment path, i.e. runs/exp --> runs/exp{sep}0, runs/exp{sep}1 etc.
529 | path = Path(path) # os-agnostic
530 | if (path.exists() and exist_ok) or (not path.exists()):
531 | return str(path)
532 | else:
533 | dirs = glob.glob(f"{path}{sep}*") # similar paths
534 | matches = [re.search(rf"%s{sep}(\d+)" % path.stem, d) for d in dirs]
535 | i = [int(m.groups()[0]) for m in matches if m] # indices
536 | n = max(i) + 1 if i else 2 # increment number
537 | return f"{path}{sep}{n}" # update path
538 |
--------------------------------------------------------------------------------
/train.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import logging
3 | import math
4 | import os
5 | import random
6 | import time
7 | from pathlib import Path
8 | from threading import Thread
9 | from warnings import warn
10 |
11 | import numpy as np
12 | import torch.distributed as dist
13 | import torch.nn as nn
14 | import torch.nn.functional as F
15 | import torch.optim as optim
16 | import torch.optim.lr_scheduler as lr_scheduler
17 | import torch.utils.data
18 | import yaml
19 | from torch.cuda import amp
20 | from torch.nn.parallel import DistributedDataParallel as DDP
21 | from torch.utils.tensorboard import SummaryWriter
22 | from tqdm import tqdm
23 |
24 | import test # import test.py to get mAP after each epoch
25 | from models.experimental import attempt_load
26 | from models.yolo import Model
27 | from utils.autoanchor import check_anchors
28 | from utils.datasets import create_dataloader
29 | from utils.general import labels_to_class_weights, increment_path, labels_to_image_weights, init_seeds, \
30 | fitness, strip_optimizer, get_latest_run, check_dataset, check_file, check_git_status, check_img_size, \
31 | print_mutation, set_logging
32 | from utils.google_utils import attempt_download
33 | from utils.loss import compute_loss
34 | from utils.plots import plot_images, plot_labels, plot_results, plot_evolution
35 | from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first
36 |
37 | logger = logging.getLogger(__name__)
38 |
39 | try:
40 | import wandb
41 | except ImportError:
42 | wandb = None
43 | logger.info("Install Weights & Biases for experiment logging via 'pip install wandb' (recommended)")
44 |
45 |
46 | def train(hyp, opt, device, tb_writer=None, wandb=None):
47 | logger.info(f'Hyperparameters {hyp}')
48 | save_dir, epochs, batch_size, total_batch_size, weights, rank = \
49 | Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank
50 |
51 | # Directories
52 | wdir = save_dir / 'weights'
53 | wdir.mkdir(parents=True, exist_ok=True) # make dir
54 | last = wdir / 'last.pt'
55 | best = wdir / 'best.pt'
56 | results_file = save_dir / 'results.txt'
57 |
58 | # Save run settings
59 | with open(save_dir / 'hyp.yaml', 'w') as f:
60 | yaml.dump(hyp, f, sort_keys=False)
61 | with open(save_dir / 'opt.yaml', 'w') as f:
62 | yaml.dump(vars(opt), f, sort_keys=False)
63 |
64 | # Configure
65 | plots = not opt.evolve # create plots
66 | cuda = device.type != 'cpu'
67 | init_seeds(2 + rank)
68 | with open(opt.data) as f:
69 | data_dict = yaml.load(f, Loader=yaml.FullLoader) # data dict
70 | with torch_distributed_zero_first(rank):
71 | check_dataset(data_dict) # check
72 | train_path = data_dict['train']
73 | test_path = data_dict['val']
74 | nc = 1 if opt.single_cls else int(data_dict['nc']) # number of classes
75 | names = ['item'] if opt.single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names
76 | assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check
77 |
78 | # Model
79 | pretrained = weights.endswith('.pt')
80 | if pretrained:
81 | with torch_distributed_zero_first(rank):
82 | attempt_download(weights) # download if not found locally
83 | ckpt = torch.load(weights, map_location=device) # load checkpoint
84 | if hyp.get('anchors'):
85 | ckpt['model'].yaml['anchors'] = round(hyp['anchors']) # force autoanchor
86 | model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc).to(device) # create
87 | exclude = ['anchor'] if opt.cfg or hyp.get('anchors') else [] # exclude keys
88 | state_dict = ckpt['model'].float().state_dict() # to FP32
89 | state_dict = intersect_dicts(state_dict, model.state_dict(), exclude=exclude) # intersect
90 | model.load_state_dict(state_dict, strict=False) # load
91 | logger.info('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights)) # report
92 | else:
93 | model = Model(opt.cfg, ch=3, nc=nc).to(device) # create
94 |
95 | # Freeze
96 | freeze = [] # parameter names to freeze (full or partial)
97 | for k, v in model.named_parameters():
98 | v.requires_grad = True # train all layers
99 | if any(x in k for x in freeze):
100 | print('freezing %s' % k)
101 | v.requires_grad = False
102 |
103 | # Optimizer
104 | nbs = 64 # nominal batch size
105 | accumulate = max(round(nbs / total_batch_size), 1) # accumulate loss before optimizing
106 | hyp['weight_decay'] *= total_batch_size * accumulate / nbs # scale weight_decay
107 |
108 | pg0, pg1, pg2 = [], [], [] # optimizer parameter groups
109 | for k, v in model.named_modules():
110 | if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter):
111 | pg2.append(v.bias) # biases
112 | if isinstance(v, nn.BatchNorm2d):
113 | pg0.append(v.weight) # no decay
114 | elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter):
115 | pg1.append(v.weight) # apply decay
116 |
117 | if opt.adam:
118 | optimizer = optim.Adam(pg0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum
119 | else:
120 | optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True)
121 |
122 | optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']}) # add pg1 with weight_decay
123 | optimizer.add_param_group({'params': pg2}) # add pg2 (biases)
124 | logger.info('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0)))
125 | del pg0, pg1, pg2
126 |
127 | # Scheduler https://arxiv.org/pdf/1812.01187.pdf
128 | # https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR
129 | lf = lambda x: ((1 + math.cos(x * math.pi / epochs)) / 2) * (1 - hyp['lrf']) + hyp['lrf'] # cosine
130 | scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
131 | # plot_lr_scheduler(optimizer, scheduler, epochs)
132 |
133 | # Logging
134 | if wandb and wandb.run is None:
135 | opt.hyp = hyp # add hyperparameters
136 | wandb_run = wandb.init(config=opt, resume="allow",
137 | project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem,
138 | name=save_dir.stem,
139 | id=ckpt.get('wandb_id') if 'ckpt' in locals() else None)
140 | loggers = {'wandb': wandb} # loggers dict
141 |
142 | # Resume
143 | start_epoch, best_fitness = 0, 0.0
144 | if pretrained:
145 | # Optimizer
146 | if ckpt['optimizer'] is not None:
147 | optimizer.load_state_dict(ckpt['optimizer'])
148 | best_fitness = ckpt['best_fitness']
149 |
150 | # Results
151 | if ckpt.get('training_results') is not None:
152 | with open(results_file, 'w') as file:
153 | file.write(ckpt['training_results']) # write results.txt
154 |
155 | # Epochs
156 | start_epoch = ckpt['epoch'] + 1
157 | if opt.resume:
158 | assert start_epoch > 0, '%s training to %g epochs is finished, nothing to resume.' % (weights, epochs)
159 | if epochs < start_epoch:
160 | logger.info('%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' %
161 | (weights, ckpt['epoch'], epochs))
162 | epochs += ckpt['epoch'] # finetune additional epochs
163 |
164 | del ckpt, state_dict
165 |
166 | # Image sizes
167 | gs = int(max(model.stride)) # grid size (max stride)
168 | imgsz, imgsz_test = [check_img_size(x, gs) for x in opt.img_size] # verify imgsz are gs-multiples
169 |
170 | # DP mode
171 | if cuda and rank == -1 and torch.cuda.device_count() > 1:
172 | model = torch.nn.DataParallel(model)
173 |
174 | # SyncBatchNorm
175 | if opt.sync_bn and cuda and rank != -1:
176 | model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
177 | logger.info('Using SyncBatchNorm()')
178 |
179 | # EMA
180 | ema = ModelEMA(model) if rank in [-1, 0] else None
181 |
182 | # DDP mode
183 | if cuda and rank != -1:
184 | model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank)
185 |
186 | # Trainloader
187 | dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt,
188 | hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=rank,
189 | world_size=opt.world_size, workers=opt.workers,
190 | image_weights=opt.image_weights)
191 | mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class
192 | nb = len(dataloader) # number of batches
193 | assert mlc < nc, 'Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g' % (mlc, nc, opt.data, nc - 1)
194 |
195 | # Process 0
196 | if rank in [-1, 0]:
197 | ema.updates = start_epoch * nb // accumulate # set EMA updates
198 | testloader = create_dataloader(test_path, imgsz_test, total_batch_size, gs, opt, # testloader
199 | hyp=hyp, cache=opt.cache_images and not opt.notest, rect=True,
200 | rank=-1, world_size=opt.world_size, workers=opt.workers, pad=0.5)[0]
201 |
202 | if not opt.resume:
203 | labels = np.concatenate(dataset.labels, 0)
204 | c = torch.tensor(labels[:, 0]) # classes
205 | # cf = torch.bincount(c.long(), minlength=nc) + 1. # frequency
206 | # model._initialize_biases(cf.to(device))
207 | if plots:
208 | plot_labels(labels, save_dir, loggers)
209 | if tb_writer:
210 | tb_writer.add_histogram('classes', c, 0)
211 |
212 | # Anchors
213 | if not opt.noautoanchor:
214 | check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz)
215 |
216 | # Model parameters
217 | hyp['cls'] *= nc / 80. # scale coco-tuned hyp['cls'] to current dataset
218 | model.nc = nc # attach number of classes to model
219 | model.hyp = hyp # attach hyperparameters to model
220 | model.gr = 1.0 # iou loss ratio (obj_loss = 1.0 or iou)
221 | model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights
222 | model.names = names
223 |
224 | # Start training
225 | t0 = time.time()
226 | nw = max(round(hyp['warmup_epochs'] * nb), 1000) # number of warmup iterations, max(3 epochs, 1k iterations)
227 | # nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training
228 | maps = np.zeros(nc) # mAP per class
229 | results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls)
230 | scheduler.last_epoch = start_epoch - 1 # do not move
231 | scaler = amp.GradScaler(enabled=cuda)
232 | logger.info('Image sizes %g train, %g test\n'
233 | 'Using %g dataloader workers\nLogging results to %s\n'
234 | 'Starting training for %g epochs...' % (imgsz, imgsz_test, dataloader.num_workers, save_dir, epochs))
235 | for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------
236 | model.train()
237 |
238 | # Update image weights (optional)
239 | if opt.image_weights:
240 | # Generate indices
241 | if rank in [-1, 0]:
242 | cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights
243 | iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights
244 | dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx
245 | # Broadcast if DDP
246 | if rank != -1:
247 | indices = (torch.tensor(dataset.indices) if rank == 0 else torch.zeros(dataset.n)).int()
248 | dist.broadcast(indices, 0)
249 | if rank != 0:
250 | dataset.indices = indices.cpu().numpy()
251 |
252 | # Update mosaic border
253 | # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)
254 | # dataset.mosaic_border = [b - imgsz, -b] # height, width borders
255 |
256 | mloss = torch.zeros(5, device=device) # mean losses
257 | if rank != -1:
258 | dataloader.sampler.set_epoch(epoch)
259 | pbar = enumerate(dataloader)
260 | logger.info(('\n' + '%10s' * 9) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'landmark', 'total', 'targets', 'img_size'))
261 | if rank in [-1, 0]:
262 | pbar = tqdm(pbar, total=nb) # progress bar
263 | optimizer.zero_grad()
264 | for i, (imgs, targets, paths, _) in pbar: # batch -------------------------------------------------------------
265 | ni = i + nb * epoch # number integrated batches (since train start)
266 | imgs = imgs.to(device, non_blocking=True).float() / 255.0 # uint8 to float32, 0-255 to 0.0-1.0
267 |
268 | # Warmup
269 | if ni <= nw:
270 | xi = [0, nw] # x interp
271 | # model.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou)
272 | accumulate = max(1, np.interp(ni, xi, [1, nbs / total_batch_size]).round())
273 | for j, x in enumerate(optimizer.param_groups):
274 | # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
275 | x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 2 else 0.0, x['initial_lr'] * lf(epoch)])
276 | if 'momentum' in x:
277 | x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']])
278 |
279 | # Multi-scale
280 | if opt.multi_scale:
281 | sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size
282 | sf = sz / max(imgs.shape[2:]) # scale factor
283 | if sf != 1:
284 | ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple)
285 | imgs = F.interpolate(imgs, size=ns, mode='bilinear', align_corners=False)
286 |
287 | # Forward
288 | with amp.autocast(enabled=cuda):
289 | pred = model(imgs) # forward
290 | loss, loss_items = compute_loss(pred, targets.to(device), model) # loss scaled by batch_size
291 | if rank != -1:
292 | loss *= opt.world_size # gradient averaged between devices in DDP mode
293 |
294 | # Backward
295 | scaler.scale(loss).backward()
296 |
297 | # Optimize
298 | if ni % accumulate == 0:
299 | scaler.step(optimizer) # optimizer.step
300 | scaler.update()
301 | optimizer.zero_grad()
302 | if ema:
303 | ema.update(model)
304 |
305 | # Print
306 | if rank in [-1, 0]:
307 | mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
308 | mem = '%.3gG' % (torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0) # (GB)
309 | s = ('%10s' * 2 + '%10.4g' * 7) % (
310 | '%g/%g' % (epoch, epochs - 1), mem, *mloss, targets.shape[0], imgs.shape[-1])
311 | pbar.set_description(s)
312 |
313 | # Plot
314 | if plots and ni < 3:
315 | f = save_dir / f'train_batch{ni}.jpg' # filename
316 | Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start()
317 | # if tb_writer:
318 | # tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch)
319 | # tb_writer.add_graph(model, imgs) # add model to tensorboard
320 | elif plots and ni == 3 and wandb:
321 | wandb.log({"Mosaics": [wandb.Image(str(x), caption=x.name) for x in save_dir.glob('train*.jpg')]})
322 |
323 | # end batch ------------------------------------------------------------------------------------------------
324 | # end epoch ----------------------------------------------------------------------------------------------------
325 |
326 | # Scheduler
327 | lr = [x['lr'] for x in optimizer.param_groups] # for tensorboard
328 | scheduler.step()
329 |
330 | # DDP process 0 or single-GPU
331 | if rank in [-1, 0]:
332 | # mAP
333 | if ema:
334 | ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'gr', 'names', 'stride', 'class_weights'])
335 | final_epoch = epoch + 1 == epochs
336 | if not opt.notest or final_epoch: # Calculate mAP
337 | results, maps, times = test.test(opt.data,
338 | batch_size=total_batch_size,
339 | imgsz=imgsz_test,
340 | model=ema.ema,
341 | single_cls=opt.single_cls,
342 | dataloader=testloader,
343 | save_dir=save_dir,
344 | plots=plots and final_epoch,
345 | log_imgs=opt.log_imgs if wandb else 0)
346 |
347 | # Write
348 | with open(results_file, 'a') as f:
349 | f.write(s + '%10.4g' * 7 % results + '\n') # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls)
350 | if len(opt.name) and opt.bucket:
351 | os.system('gsutil cp %s gs://%s/results/results%s.txt' % (results_file, opt.bucket, opt.name))
352 |
353 | # Log
354 | tags = ['train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss
355 | 'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95',
356 | 'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss
357 | 'x/lr0', 'x/lr1', 'x/lr2'] # params
358 | for x, tag in zip(list(mloss[:-1]) + list(results) + lr, tags):
359 | if tb_writer:
360 | tb_writer.add_scalar(tag, x, epoch) # tensorboard
361 | if wandb:
362 | wandb.log({tag: x}) # W&B
363 |
364 | # Update best mAP
365 | fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95]
366 | if fi > best_fitness:
367 | best_fitness = fi
368 |
369 | # Save model
370 | save = (not opt.nosave) or (final_epoch and not opt.evolve)
371 | if save:
372 | with open(results_file, 'r') as f: # create checkpoint
373 | ckpt = {'epoch': epoch,
374 | 'best_fitness': best_fitness,
375 | 'training_results': f.read(),
376 | 'model': ema.ema,
377 | 'optimizer': None if final_epoch else optimizer.state_dict(),
378 | 'wandb_id': wandb_run.id if wandb else None}
379 |
380 | # Save last, best and delete
381 | torch.save(ckpt, last)
382 | if best_fitness == fi:
383 | torch.save(ckpt, best)
384 | del ckpt
385 | # end epoch ----------------------------------------------------------------------------------------------------
386 | # end training
387 |
388 | if rank in [-1, 0]:
389 | # Strip optimizers
390 | final = best if best.exists() else last # final model
391 | for f in [last, best]:
392 | if f.exists():
393 | strip_optimizer(f) # strip optimizers
394 | if opt.bucket:
395 | os.system(f'gsutil cp {final} gs://{opt.bucket}/weights') # upload
396 |
397 | # Plots
398 | if plots:
399 | plot_results(save_dir=save_dir) # save as results.png
400 | if wandb:
401 | files = ['results.png', 'precision_recall_curve.png', 'confusion_matrix.png']
402 | wandb.log({"Results": [wandb.Image(str(save_dir / f), caption=f) for f in files
403 | if (save_dir / f).exists()]})
404 | if opt.log_artifacts:
405 | wandb.log_artifact(artifact_or_path=str(final), type='model', name=save_dir.stem)
406 |
407 | # Test best.pt
408 | logger.info('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600))
409 | if opt.data.endswith('coco.yaml') and nc == 80: # if COCO
410 | for conf, iou, save_json in ([0.25, 0.45, False], [0.001, 0.65, True]): # speed, mAP tests
411 | results, _, _ = test.test(opt.data,
412 | batch_size=total_batch_size,
413 | imgsz=imgsz_test,
414 | conf_thres=conf,
415 | iou_thres=iou,
416 | model=attempt_load(final, device).half(),
417 | single_cls=opt.single_cls,
418 | dataloader=testloader,
419 | save_dir=save_dir,
420 | save_json=save_json,
421 | plots=False)
422 |
423 | else:
424 | dist.destroy_process_group()
425 |
426 | wandb.run.finish() if wandb and wandb.run else None
427 | torch.cuda.empty_cache()
428 | return results
429 |
430 |
431 | if __name__ == '__main__':
432 | parser = argparse.ArgumentParser()
433 | parser.add_argument('--weights', type=str, default='./weights/yolov5s.pt', help='initial weights path')
434 | parser.add_argument('--cfg', type=str, default='', help='model.yaml path')
435 | parser.add_argument('--data', type=str, default='data/wideface.yaml', help='data.yaml path')
436 | parser.add_argument('--hyp', type=str, default='data/hyp.scratch.yaml', help='hyperparameters path')
437 | parser.add_argument('--epochs', type=int, default=300)
438 | parser.add_argument('--batch-size', type=int, default=6, help='total batch size for all GPUs')
439 | parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='[train, test] image sizes')
440 | parser.add_argument('--rect', action='store_true', help='rectangular training')
441 | parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training')
442 | parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')
443 | parser.add_argument('--notest', action='store_true', help='only test final epoch')
444 | parser.add_argument('--noautoanchor', action='store_true', help='disable autoanchor check')
445 | parser.add_argument('--evolve', action='store_true', help='evolve hyperparameters')
446 | parser.add_argument('--bucket', type=str, default='', help='gsutil bucket')
447 | parser.add_argument('--cache-images', action='store_true', help='cache images for faster training')
448 | parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training')
449 | parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
450 | parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%')
451 | parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class')
452 | parser.add_argument('--adam', action='store_true', help='use torch.optim.Adam() optimizer')
453 | parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode')
454 | parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify')
455 | parser.add_argument('--log-imgs', type=int, default=16, help='number of images for W&B logging, max 100')
456 | parser.add_argument('--log-artifacts', action='store_true', help='log artifacts, i.e. final trained model')
457 | parser.add_argument('--workers', type=int, default=2, help='maximum number of dataloader workers')
458 | parser.add_argument('--project', default='runs/train', help='save to project/name')
459 | parser.add_argument('--name', default='exp', help='save to project/name')
460 | parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
461 | opt = parser.parse_args()
462 |
463 | # Set DDP variables
464 | opt.total_batch_size = opt.batch_size
465 | opt.world_size = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1
466 | opt.global_rank = int(os.environ['RANK']) if 'RANK' in os.environ else -1
467 | set_logging(opt.global_rank)
468 | if opt.global_rank in [-1, 0]:
469 | check_git_status()
470 |
471 | # Resume
472 | if opt.resume: # resume an interrupted run
473 | ckpt = opt.resume if isinstance(opt.resume, str) else get_latest_run() # specified or most recent path
474 | assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist'
475 | with open(Path(ckpt).parent.parent / 'opt.yaml') as f:
476 | opt = argparse.Namespace(**yaml.load(f, Loader=yaml.FullLoader)) # replace
477 | opt.cfg, opt.weights, opt.resume = '', ckpt, True
478 | logger.info('Resuming training from %s' % ckpt)
479 | else:
480 | # opt.hyp = opt.hyp or ('hyp.finetune.yaml' if opt.weights else 'hyp.scratch.yaml')
481 | opt.data, opt.cfg, opt.hyp = check_file(opt.data), check_file(opt.cfg), check_file(opt.hyp) # check files
482 | assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified'
483 | opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, test)
484 | opt.name = 'evolve' if opt.evolve else opt.name
485 | opt.save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok | opt.evolve) # increment run
486 |
487 | # DDP mode
488 | device = select_device(opt.device, batch_size=opt.batch_size)
489 | if opt.local_rank != -1:
490 | assert torch.cuda.device_count() > opt.local_rank
491 | torch.cuda.set_device(opt.local_rank)
492 | device = torch.device('cuda', opt.local_rank)
493 | dist.init_process_group(backend='nccl', init_method='env://') # distributed backend
494 | assert opt.batch_size % opt.world_size == 0, '--batch-size must be multiple of CUDA device count'
495 | opt.batch_size = opt.total_batch_size // opt.world_size
496 |
497 | # Hyperparameters
498 | with open(opt.hyp) as f:
499 | hyp = yaml.load(f, Loader=yaml.FullLoader) # load hyps
500 | if 'box' not in hyp:
501 | warn('Compatibility: %s missing "box" which was renamed from "giou" in %s' %
502 | (opt.hyp, 'https://github.com/ultralytics/yolov5/pull/1120'))
503 | hyp['box'] = hyp.pop('giou')
504 |
505 | # Train
506 | logger.info(opt)
507 | if not opt.evolve:
508 | tb_writer = None # init loggers
509 | if opt.global_rank in [-1, 0]:
510 | logger.info(f'Start Tensorboard with "tensorboard --logdir {opt.project}", view at http://localhost:6006/')
511 | tb_writer = SummaryWriter(opt.save_dir) # Tensorboard
512 | train(hyp, opt, device, tb_writer, wandb)
513 |
514 | # Evolve hyperparameters (optional)
515 | else:
516 | # Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit)
517 | meta = {'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3)
518 | 'lrf': (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf)
519 | 'momentum': (0.3, 0.6, 0.98), # SGD momentum/Adam beta1
520 | 'weight_decay': (1, 0.0, 0.001), # optimizer weight decay
521 | 'warmup_epochs': (1, 0.0, 5.0), # warmup epochs (fractions ok)
522 | 'warmup_momentum': (1, 0.0, 0.95), # warmup initial momentum
523 | 'warmup_bias_lr': (1, 0.0, 0.2), # warmup initial bias lr
524 | 'box': (1, 0.02, 0.2), # box loss gain
525 | 'cls': (1, 0.2, 4.0), # cls loss gain
526 | 'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight
527 | 'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels)
528 | 'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight
529 | 'iou_t': (0, 0.1, 0.7), # IoU training threshold
530 | 'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold
531 | 'anchors': (2, 2.0, 10.0), # anchors per output grid (0 to ignore)
532 | 'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5)
533 | 'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction)
534 | 'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction)
535 | 'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction)
536 | 'degrees': (1, 0.0, 45.0), # image rotation (+/- deg)
537 | 'translate': (1, 0.0, 0.9), # image translation (+/- fraction)
538 | 'scale': (1, 0.0, 0.9), # image scale (+/- gain)
539 | 'shear': (1, 0.0, 10.0), # image shear (+/- deg)
540 | 'perspective': (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001
541 | 'flipud': (1, 0.0, 1.0), # image flip up-down (probability)
542 | 'fliplr': (0, 0.0, 1.0), # image flip left-right (probability)
543 | 'mosaic': (1, 0.0, 1.0), # image mixup (probability)
544 | 'mixup': (1, 0.0, 1.0)} # image mixup (probability)
545 |
546 | assert opt.local_rank == -1, 'DDP mode not implemented for --evolve'
547 | opt.notest, opt.nosave = True, True # only test/save final epoch
548 | # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices
549 | yaml_file = Path(opt.save_dir) / 'hyp_evolved.yaml' # save best result here
550 | if opt.bucket:
551 | os.system('gsutil cp gs://%s/evolve.txt .' % opt.bucket) # download evolve.txt if exists
552 |
553 | for _ in range(300): # generations to evolve
554 | if Path('evolve.txt').exists(): # if evolve.txt exists: select best hyps and mutate
555 | # Select parent(s)
556 | parent = 'single' # parent selection method: 'single' or 'weighted'
557 | x = np.loadtxt('evolve.txt', ndmin=2)
558 | n = min(5, len(x)) # number of previous results to consider
559 | x = x[np.argsort(-fitness(x))][:n] # top n mutations
560 | w = fitness(x) - fitness(x).min() # weights
561 | if parent == 'single' or len(x) == 1:
562 | # x = x[random.randint(0, n - 1)] # random selection
563 | x = x[random.choices(range(n), weights=w)[0]] # weighted selection
564 | elif parent == 'weighted':
565 | x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination
566 |
567 | # Mutate
568 | mp, s = 0.8, 0.2 # mutation probability, sigma
569 | npr = np.random
570 | npr.seed(int(time.time()))
571 | g = np.array([x[0] for x in meta.values()]) # gains 0-1
572 | ng = len(meta)
573 | v = np.ones(ng)
574 | while all(v == 1): # mutate until a change occurs (prevent duplicates)
575 | v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0)
576 | for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300)
577 | hyp[k] = float(x[i + 7] * v[i]) # mutate
578 |
579 | # Constrain to limits
580 | for k, v in meta.items():
581 | hyp[k] = max(hyp[k], v[1]) # lower limit
582 | hyp[k] = min(hyp[k], v[2]) # upper limit
583 | hyp[k] = round(hyp[k], 5) # significant digits
584 |
585 | # Train mutation
586 | results = train(hyp.copy(), opt, device, wandb=wandb)
587 |
588 | # Write mutation results
589 | print_mutation(hyp.copy(), results, yaml_file, opt.bucket)
590 |
591 | # Plot results
592 | plot_evolution(yaml_file)
593 | print(f'Hyperparameter evolution complete. Best results saved as: {yaml_file}\n'
594 | f'Command to train a new model with these hyperparameters: $ python train.py --hyp {yaml_file}')
595 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | GNU GENERAL PUBLIC LICENSE
2 | Version 3, 29 June 2007
3 |
4 | Copyright (C) 2007 Free Software Foundation, Inc.
5 | Everyone is permitted to copy and distribute verbatim copies
6 | of this license document, but changing it is not allowed.
7 |
8 | Preamble
9 |
10 | The GNU General Public License is a free, copyleft license for
11 | software and other kinds of works.
12 |
13 | The licenses for most software and other practical works are designed
14 | to take away your freedom to share and change the works. By contrast,
15 | the GNU General Public License is intended to guarantee your freedom to
16 | share and change all versions of a program--to make sure it remains free
17 | software for all its users. We, the Free Software Foundation, use the
18 | GNU General Public License for most of our software; it applies also to
19 | any other work released this way by its authors. You can apply it to
20 | your programs, too.
21 |
22 | When we speak of free software, we are referring to freedom, not
23 | price. Our General Public Licenses are designed to make sure that you
24 | have the freedom to distribute copies of free software (and charge for
25 | them if you wish), that you receive source code or can get it if you
26 | want it, that you can change the software or use pieces of it in new
27 | free programs, and that you know you can do these things.
28 |
29 | To protect your rights, we need to prevent others from denying you
30 | these rights or asking you to surrender the rights. Therefore, you have
31 | certain responsibilities if you distribute copies of the software, or if
32 | you modify it: responsibilities to respect the freedom of others.
33 |
34 | For example, if you distribute copies of such a program, whether
35 | gratis or for a fee, you must pass on to the recipients the same
36 | freedoms that you received. You must make sure that they, too, receive
37 | or can get the source code. And you must show them these terms so they
38 | know their rights.
39 |
40 | Developers that use the GNU GPL protect your rights with two steps:
41 | (1) assert copyright on the software, and (2) offer you this License
42 | giving you legal permission to copy, distribute and/or modify it.
43 |
44 | For the developers' and authors' protection, the GPL clearly explains
45 | that there is no warranty for this free software. For both users' and
46 | authors' sake, the GPL requires that modified versions be marked as
47 | changed, so that their problems will not be attributed erroneously to
48 | authors of previous versions.
49 |
50 | Some devices are designed to deny users access to install or run
51 | modified versions of the software inside them, although the manufacturer
52 | can do so. This is fundamentally incompatible with the aim of
53 | protecting users' freedom to change the software. The systematic
54 | pattern of such abuse occurs in the area of products for individuals to
55 | use, which is precisely where it is most unacceptable. Therefore, we
56 | have designed this version of the GPL to prohibit the practice for those
57 | products. If such problems arise substantially in other domains, we
58 | stand ready to extend this provision to those domains in future versions
59 | of the GPL, as needed to protect the freedom of users.
60 |
61 | Finally, every program is threatened constantly by software patents.
62 | States should not allow patents to restrict development and use of
63 | software on general-purpose computers, but in those that do, we wish to
64 | avoid the special danger that patents applied to a free program could
65 | make it effectively proprietary. To prevent this, the GPL assures that
66 | patents cannot be used to render the program non-free.
67 |
68 | The precise terms and conditions for copying, distribution and
69 | modification follow.
70 |
71 | TERMS AND CONDITIONS
72 |
73 | 0. Definitions.
74 |
75 | "This License" refers to version 3 of the GNU General Public License.
76 |
77 | "Copyright" also means copyright-like laws that apply to other kinds of
78 | works, such as semiconductor masks.
79 |
80 | "The Program" refers to any copyrightable work licensed under this
81 | License. Each licensee is addressed as "you". "Licensees" and
82 | "recipients" may be individuals or organizations.
83 |
84 | To "modify" a work means to copy from or adapt all or part of the work
85 | in a fashion requiring copyright permission, other than the making of an
86 | exact copy. The resulting work is called a "modified version" of the
87 | earlier work or a work "based on" the earlier work.
88 |
89 | A "covered work" means either the unmodified Program or a work based
90 | on the Program.
91 |
92 | To "propagate" a work means to do anything with it that, without
93 | permission, would make you directly or secondarily liable for
94 | infringement under applicable copyright law, except executing it on a
95 | computer or modifying a private copy. Propagation includes copying,
96 | distribution (with or without modification), making available to the
97 | public, and in some countries other activities as well.
98 |
99 | To "convey" a work means any kind of propagation that enables other
100 | parties to make or receive copies. Mere interaction with a user through
101 | a computer network, with no transfer of a copy, is not conveying.
102 |
103 | An interactive user interface displays "Appropriate Legal Notices"
104 | to the extent that it includes a convenient and prominently visible
105 | feature that (1) displays an appropriate copyright notice, and (2)
106 | tells the user that there is no warranty for the work (except to the
107 | extent that warranties are provided), that licensees may convey the
108 | work under this License, and how to view a copy of this License. If
109 | the interface presents a list of user commands or options, such as a
110 | menu, a prominent item in the list meets this criterion.
111 |
112 | 1. Source Code.
113 |
114 | The "source code" for a work means the preferred form of the work
115 | for making modifications to it. "Object code" means any non-source
116 | form of a work.
117 |
118 | A "Standard Interface" means an interface that either is an official
119 | standard defined by a recognized standards body, or, in the case of
120 | interfaces specified for a particular programming language, one that
121 | is widely used among developers working in that language.
122 |
123 | The "System Libraries" of an executable work include anything, other
124 | than the work as a whole, that (a) is included in the normal form of
125 | packaging a Major Component, but which is not part of that Major
126 | Component, and (b) serves only to enable use of the work with that
127 | Major Component, or to implement a Standard Interface for which an
128 | implementation is available to the public in source code form. A
129 | "Major Component", in this context, means a major essential component
130 | (kernel, window system, and so on) of the specific operating system
131 | (if any) on which the executable work runs, or a compiler used to
132 | produce the work, or an object code interpreter used to run it.
133 |
134 | The "Corresponding Source" for a work in object code form means all
135 | the source code needed to generate, install, and (for an executable
136 | work) run the object code and to modify the work, including scripts to
137 | control those activities. However, it does not include the work's
138 | System Libraries, or general-purpose tools or generally available free
139 | programs which are used unmodified in performing those activities but
140 | which are not part of the work. For example, Corresponding Source
141 | includes interface definition files associated with source files for
142 | the work, and the source code for shared libraries and dynamically
143 | linked subprograms that the work is specifically designed to require,
144 | such as by intimate data communication or control flow between those
145 | subprograms and other parts of the work.
146 |
147 | The Corresponding Source need not include anything that users
148 | can regenerate automatically from other parts of the Corresponding
149 | Source.
150 |
151 | The Corresponding Source for a work in source code form is that
152 | same work.
153 |
154 | 2. Basic Permissions.
155 |
156 | All rights granted under this License are granted for the term of
157 | copyright on the Program, and are irrevocable provided the stated
158 | conditions are met. This License explicitly affirms your unlimited
159 | permission to run the unmodified Program. The output from running a
160 | covered work is covered by this License only if the output, given its
161 | content, constitutes a covered work. This License acknowledges your
162 | rights of fair use or other equivalent, as provided by copyright law.
163 |
164 | You may make, run and propagate covered works that you do not
165 | convey, without conditions so long as your license otherwise remains
166 | in force. You may convey covered works to others for the sole purpose
167 | of having them make modifications exclusively for you, or provide you
168 | with facilities for running those works, provided that you comply with
169 | the terms of this License in conveying all material for which you do
170 | not control copyright. Those thus making or running the covered works
171 | for you must do so exclusively on your behalf, under your direction
172 | and control, on terms that prohibit them from making any copies of
173 | your copyrighted material outside their relationship with you.
174 |
175 | Conveying under any other circumstances is permitted solely under
176 | the conditions stated below. Sublicensing is not allowed; section 10
177 | makes it unnecessary.
178 |
179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
180 |
181 | No covered work shall be deemed part of an effective technological
182 | measure under any applicable law fulfilling obligations under article
183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or
184 | similar laws prohibiting or restricting circumvention of such
185 | measures.
186 |
187 | When you convey a covered work, you waive any legal power to forbid
188 | circumvention of technological measures to the extent such circumvention
189 | is effected by exercising rights under this License with respect to
190 | the covered work, and you disclaim any intention to limit operation or
191 | modification of the work as a means of enforcing, against the work's
192 | users, your or third parties' legal rights to forbid circumvention of
193 | technological measures.
194 |
195 | 4. Conveying Verbatim Copies.
196 |
197 | You may convey verbatim copies of the Program's source code as you
198 | receive it, in any medium, provided that you conspicuously and
199 | appropriately publish on each copy an appropriate copyright notice;
200 | keep intact all notices stating that this License and any
201 | non-permissive terms added in accord with section 7 apply to the code;
202 | keep intact all notices of the absence of any warranty; and give all
203 | recipients a copy of this License along with the Program.
204 |
205 | You may charge any price or no price for each copy that you convey,
206 | and you may offer support or warranty protection for a fee.
207 |
208 | 5. Conveying Modified Source Versions.
209 |
210 | You may convey a work based on the Program, or the modifications to
211 | produce it from the Program, in the form of source code under the
212 | terms of section 4, provided that you also meet all of these conditions:
213 |
214 | a) The work must carry prominent notices stating that you modified
215 | it, and giving a relevant date.
216 |
217 | b) The work must carry prominent notices stating that it is
218 | released under this License and any conditions added under section
219 | 7. This requirement modifies the requirement in section 4 to
220 | "keep intact all notices".
221 |
222 | c) You must license the entire work, as a whole, under this
223 | License to anyone who comes into possession of a copy. This
224 | License will therefore apply, along with any applicable section 7
225 | additional terms, to the whole of the work, and all its parts,
226 | regardless of how they are packaged. This License gives no
227 | permission to license the work in any other way, but it does not
228 | invalidate such permission if you have separately received it.
229 |
230 | d) If the work has interactive user interfaces, each must display
231 | Appropriate Legal Notices; however, if the Program has interactive
232 | interfaces that do not display Appropriate Legal Notices, your
233 | work need not make them do so.
234 |
235 | A compilation of a covered work with other separate and independent
236 | works, which are not by their nature extensions of the covered work,
237 | and which are not combined with it such as to form a larger program,
238 | in or on a volume of a storage or distribution medium, is called an
239 | "aggregate" if the compilation and its resulting copyright are not
240 | used to limit the access or legal rights of the compilation's users
241 | beyond what the individual works permit. Inclusion of a covered work
242 | in an aggregate does not cause this License to apply to the other
243 | parts of the aggregate.
244 |
245 | 6. Conveying Non-Source Forms.
246 |
247 | You may convey a covered work in object code form under the terms
248 | of sections 4 and 5, provided that you also convey the
249 | machine-readable Corresponding Source under the terms of this License,
250 | in one of these ways:
251 |
252 | a) Convey the object code in, or embodied in, a physical product
253 | (including a physical distribution medium), accompanied by the
254 | Corresponding Source fixed on a durable physical medium
255 | customarily used for software interchange.
256 |
257 | b) Convey the object code in, or embodied in, a physical product
258 | (including a physical distribution medium), accompanied by a
259 | written offer, valid for at least three years and valid for as
260 | long as you offer spare parts or customer support for that product
261 | model, to give anyone who possesses the object code either (1) a
262 | copy of the Corresponding Source for all the software in the
263 | product that is covered by this License, on a durable physical
264 | medium customarily used for software interchange, for a price no
265 | more than your reasonable cost of physically performing this
266 | conveying of source, or (2) access to copy the
267 | Corresponding Source from a network server at no charge.
268 |
269 | c) Convey individual copies of the object code with a copy of the
270 | written offer to provide the Corresponding Source. This
271 | alternative is allowed only occasionally and noncommercially, and
272 | only if you received the object code with such an offer, in accord
273 | with subsection 6b.
274 |
275 | d) Convey the object code by offering access from a designated
276 | place (gratis or for a charge), and offer equivalent access to the
277 | Corresponding Source in the same way through the same place at no
278 | further charge. You need not require recipients to copy the
279 | Corresponding Source along with the object code. If the place to
280 | copy the object code is a network server, the Corresponding Source
281 | may be on a different server (operated by you or a third party)
282 | that supports equivalent copying facilities, provided you maintain
283 | clear directions next to the object code saying where to find the
284 | Corresponding Source. Regardless of what server hosts the
285 | Corresponding Source, you remain obligated to ensure that it is
286 | available for as long as needed to satisfy these requirements.
287 |
288 | e) Convey the object code using peer-to-peer transmission, provided
289 | you inform other peers where the object code and Corresponding
290 | Source of the work are being offered to the general public at no
291 | charge under subsection 6d.
292 |
293 | A separable portion of the object code, whose source code is excluded
294 | from the Corresponding Source as a System Library, need not be
295 | included in conveying the object code work.
296 |
297 | A "User Product" is either (1) a "consumer product", which means any
298 | tangible personal property which is normally used for personal, family,
299 | or household purposes, or (2) anything designed or sold for incorporation
300 | into a dwelling. In determining whether a product is a consumer product,
301 | doubtful cases shall be resolved in favor of coverage. For a particular
302 | product received by a particular user, "normally used" refers to a
303 | typical or common use of that class of product, regardless of the status
304 | of the particular user or of the way in which the particular user
305 | actually uses, or expects or is expected to use, the product. A product
306 | is a consumer product regardless of whether the product has substantial
307 | commercial, industrial or non-consumer uses, unless such uses represent
308 | the only significant mode of use of the product.
309 |
310 | "Installation Information" for a User Product means any methods,
311 | procedures, authorization keys, or other information required to install
312 | and execute modified versions of a covered work in that User Product from
313 | a modified version of its Corresponding Source. The information must
314 | suffice to ensure that the continued functioning of the modified object
315 | code is in no case prevented or interfered with solely because
316 | modification has been made.
317 |
318 | If you convey an object code work under this section in, or with, or
319 | specifically for use in, a User Product, and the conveying occurs as
320 | part of a transaction in which the right of possession and use of the
321 | User Product is transferred to the recipient in perpetuity or for a
322 | fixed term (regardless of how the transaction is characterized), the
323 | Corresponding Source conveyed under this section must be accompanied
324 | by the Installation Information. But this requirement does not apply
325 | if neither you nor any third party retains the ability to install
326 | modified object code on the User Product (for example, the work has
327 | been installed in ROM).
328 |
329 | The requirement to provide Installation Information does not include a
330 | requirement to continue to provide support service, warranty, or updates
331 | for a work that has been modified or installed by the recipient, or for
332 | the User Product in which it has been modified or installed. Access to a
333 | network may be denied when the modification itself materially and
334 | adversely affects the operation of the network or violates the rules and
335 | protocols for communication across the network.
336 |
337 | Corresponding Source conveyed, and Installation Information provided,
338 | in accord with this section must be in a format that is publicly
339 | documented (and with an implementation available to the public in
340 | source code form), and must require no special password or key for
341 | unpacking, reading or copying.
342 |
343 | 7. Additional Terms.
344 |
345 | "Additional permissions" are terms that supplement the terms of this
346 | License by making exceptions from one or more of its conditions.
347 | Additional permissions that are applicable to the entire Program shall
348 | be treated as though they were included in this License, to the extent
349 | that they are valid under applicable law. If additional permissions
350 | apply only to part of the Program, that part may be used separately
351 | under those permissions, but the entire Program remains governed by
352 | this License without regard to the additional permissions.
353 |
354 | When you convey a copy of a covered work, you may at your option
355 | remove any additional permissions from that copy, or from any part of
356 | it. (Additional permissions may be written to require their own
357 | removal in certain cases when you modify the work.) You may place
358 | additional permissions on material, added by you to a covered work,
359 | for which you have or can give appropriate copyright permission.
360 |
361 | Notwithstanding any other provision of this License, for material you
362 | add to a covered work, you may (if authorized by the copyright holders of
363 | that material) supplement the terms of this License with terms:
364 |
365 | a) Disclaiming warranty or limiting liability differently from the
366 | terms of sections 15 and 16 of this License; or
367 |
368 | b) Requiring preservation of specified reasonable legal notices or
369 | author attributions in that material or in the Appropriate Legal
370 | Notices displayed by works containing it; or
371 |
372 | c) Prohibiting misrepresentation of the origin of that material, or
373 | requiring that modified versions of such material be marked in
374 | reasonable ways as different from the original version; or
375 |
376 | d) Limiting the use for publicity purposes of names of licensors or
377 | authors of the material; or
378 |
379 | e) Declining to grant rights under trademark law for use of some
380 | trade names, trademarks, or service marks; or
381 |
382 | f) Requiring indemnification of licensors and authors of that
383 | material by anyone who conveys the material (or modified versions of
384 | it) with contractual assumptions of liability to the recipient, for
385 | any liability that these contractual assumptions directly impose on
386 | those licensors and authors.
387 |
388 | All other non-permissive additional terms are considered "further
389 | restrictions" within the meaning of section 10. If the Program as you
390 | received it, or any part of it, contains a notice stating that it is
391 | governed by this License along with a term that is a further
392 | restriction, you may remove that term. If a license document contains
393 | a further restriction but permits relicensing or conveying under this
394 | License, you may add to a covered work material governed by the terms
395 | of that license document, provided that the further restriction does
396 | not survive such relicensing or conveying.
397 |
398 | If you add terms to a covered work in accord with this section, you
399 | must place, in the relevant source files, a statement of the
400 | additional terms that apply to those files, or a notice indicating
401 | where to find the applicable terms.
402 |
403 | Additional terms, permissive or non-permissive, may be stated in the
404 | form of a separately written license, or stated as exceptions;
405 | the above requirements apply either way.
406 |
407 | 8. Termination.
408 |
409 | You may not propagate or modify a covered work except as expressly
410 | provided under this License. Any attempt otherwise to propagate or
411 | modify it is void, and will automatically terminate your rights under
412 | this License (including any patent licenses granted under the third
413 | paragraph of section 11).
414 |
415 | However, if you cease all violation of this License, then your
416 | license from a particular copyright holder is reinstated (a)
417 | provisionally, unless and until the copyright holder explicitly and
418 | finally terminates your license, and (b) permanently, if the copyright
419 | holder fails to notify you of the violation by some reasonable means
420 | prior to 60 days after the cessation.
421 |
422 | Moreover, your license from a particular copyright holder is
423 | reinstated permanently if the copyright holder notifies you of the
424 | violation by some reasonable means, this is the first time you have
425 | received notice of violation of this License (for any work) from that
426 | copyright holder, and you cure the violation prior to 30 days after
427 | your receipt of the notice.
428 |
429 | Termination of your rights under this section does not terminate the
430 | licenses of parties who have received copies or rights from you under
431 | this License. If your rights have been terminated and not permanently
432 | reinstated, you do not qualify to receive new licenses for the same
433 | material under section 10.
434 |
435 | 9. Acceptance Not Required for Having Copies.
436 |
437 | You are not required to accept this License in order to receive or
438 | run a copy of the Program. Ancillary propagation of a covered work
439 | occurring solely as a consequence of using peer-to-peer transmission
440 | to receive a copy likewise does not require acceptance. However,
441 | nothing other than this License grants you permission to propagate or
442 | modify any covered work. These actions infringe copyright if you do
443 | not accept this License. Therefore, by modifying or propagating a
444 | covered work, you indicate your acceptance of this License to do so.
445 |
446 | 10. Automatic Licensing of Downstream Recipients.
447 |
448 | Each time you convey a covered work, the recipient automatically
449 | receives a license from the original licensors, to run, modify and
450 | propagate that work, subject to this License. You are not responsible
451 | for enforcing compliance by third parties with this License.
452 |
453 | An "entity transaction" is a transaction transferring control of an
454 | organization, or substantially all assets of one, or subdividing an
455 | organization, or merging organizations. If propagation of a covered
456 | work results from an entity transaction, each party to that
457 | transaction who receives a copy of the work also receives whatever
458 | licenses to the work the party's predecessor in interest had or could
459 | give under the previous paragraph, plus a right to possession of the
460 | Corresponding Source of the work from the predecessor in interest, if
461 | the predecessor has it or can get it with reasonable efforts.
462 |
463 | You may not impose any further restrictions on the exercise of the
464 | rights granted or affirmed under this License. For example, you may
465 | not impose a license fee, royalty, or other charge for exercise of
466 | rights granted under this License, and you may not initiate litigation
467 | (including a cross-claim or counterclaim in a lawsuit) alleging that
468 | any patent claim is infringed by making, using, selling, offering for
469 | sale, or importing the Program or any portion of it.
470 |
471 | 11. Patents.
472 |
473 | A "contributor" is a copyright holder who authorizes use under this
474 | License of the Program or a work on which the Program is based. The
475 | work thus licensed is called the contributor's "contributor version".
476 |
477 | A contributor's "essential patent claims" are all patent claims
478 | owned or controlled by the contributor, whether already acquired or
479 | hereafter acquired, that would be infringed by some manner, permitted
480 | by this License, of making, using, or selling its contributor version,
481 | but do not include claims that would be infringed only as a
482 | consequence of further modification of the contributor version. For
483 | purposes of this definition, "control" includes the right to grant
484 | patent sublicenses in a manner consistent with the requirements of
485 | this License.
486 |
487 | Each contributor grants you a non-exclusive, worldwide, royalty-free
488 | patent license under the contributor's essential patent claims, to
489 | make, use, sell, offer for sale, import and otherwise run, modify and
490 | propagate the contents of its contributor version.
491 |
492 | In the following three paragraphs, a "patent license" is any express
493 | agreement or commitment, however denominated, not to enforce a patent
494 | (such as an express permission to practice a patent or covenant not to
495 | sue for patent infringement). To "grant" such a patent license to a
496 | party means to make such an agreement or commitment not to enforce a
497 | patent against the party.
498 |
499 | If you convey a covered work, knowingly relying on a patent license,
500 | and the Corresponding Source of the work is not available for anyone
501 | to copy, free of charge and under the terms of this License, through a
502 | publicly available network server or other readily accessible means,
503 | then you must either (1) cause the Corresponding Source to be so
504 | available, or (2) arrange to deprive yourself of the benefit of the
505 | patent license for this particular work, or (3) arrange, in a manner
506 | consistent with the requirements of this License, to extend the patent
507 | license to downstream recipients. "Knowingly relying" means you have
508 | actual knowledge that, but for the patent license, your conveying the
509 | covered work in a country, or your recipient's use of the covered work
510 | in a country, would infringe one or more identifiable patents in that
511 | country that you have reason to believe are valid.
512 |
513 | If, pursuant to or in connection with a single transaction or
514 | arrangement, you convey, or propagate by procuring conveyance of, a
515 | covered work, and grant a patent license to some of the parties
516 | receiving the covered work authorizing them to use, propagate, modify
517 | or convey a specific copy of the covered work, then the patent license
518 | you grant is automatically extended to all recipients of the covered
519 | work and works based on it.
520 |
521 | A patent license is "discriminatory" if it does not include within
522 | the scope of its coverage, prohibits the exercise of, or is
523 | conditioned on the non-exercise of one or more of the rights that are
524 | specifically granted under this License. You may not convey a covered
525 | work if you are a party to an arrangement with a third party that is
526 | in the business of distributing software, under which you make payment
527 | to the third party based on the extent of your activity of conveying
528 | the work, and under which the third party grants, to any of the
529 | parties who would receive the covered work from you, a discriminatory
530 | patent license (a) in connection with copies of the covered work
531 | conveyed by you (or copies made from those copies), or (b) primarily
532 | for and in connection with specific products or compilations that
533 | contain the covered work, unless you entered into that arrangement,
534 | or that patent license was granted, prior to 28 March 2007.
535 |
536 | Nothing in this License shall be construed as excluding or limiting
537 | any implied license or other defenses to infringement that may
538 | otherwise be available to you under applicable patent law.
539 |
540 | 12. No Surrender of Others' Freedom.
541 |
542 | If conditions are imposed on you (whether by court order, agreement or
543 | otherwise) that contradict the conditions of this License, they do not
544 | excuse you from the conditions of this License. If you cannot convey a
545 | covered work so as to satisfy simultaneously your obligations under this
546 | License and any other pertinent obligations, then as a consequence you may
547 | not convey it at all. For example, if you agree to terms that obligate you
548 | to collect a royalty for further conveying from those to whom you convey
549 | the Program, the only way you could satisfy both those terms and this
550 | License would be to refrain entirely from conveying the Program.
551 |
552 | 13. Use with the GNU Affero General Public License.
553 |
554 | Notwithstanding any other provision of this License, you have
555 | permission to link or combine any covered work with a work licensed
556 | under version 3 of the GNU Affero General Public License into a single
557 | combined work, and to convey the resulting work. The terms of this
558 | License will continue to apply to the part which is the covered work,
559 | but the special requirements of the GNU Affero General Public License,
560 | section 13, concerning interaction through a network will apply to the
561 | combination as such.
562 |
563 | 14. Revised Versions of this License.
564 |
565 | The Free Software Foundation may publish revised and/or new versions of
566 | the GNU General Public License from time to time. Such new versions will
567 | be similar in spirit to the present version, but may differ in detail to
568 | address new problems or concerns.
569 |
570 | Each version is given a distinguishing version number. If the
571 | Program specifies that a certain numbered version of the GNU General
572 | Public License "or any later version" applies to it, you have the
573 | option of following the terms and conditions either of that numbered
574 | version or of any later version published by the Free Software
575 | Foundation. If the Program does not specify a version number of the
576 | GNU General Public License, you may choose any version ever published
577 | by the Free Software Foundation.
578 |
579 | If the Program specifies that a proxy can decide which future
580 | versions of the GNU General Public License can be used, that proxy's
581 | public statement of acceptance of a version permanently authorizes you
582 | to choose that version for the Program.
583 |
584 | Later license versions may give you additional or different
585 | permissions. However, no additional obligations are imposed on any
586 | author or copyright holder as a result of your choosing to follow a
587 | later version.
588 |
589 | 15. Disclaimer of Warranty.
590 |
591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
592 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
596 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
597 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
599 |
600 | 16. Limitation of Liability.
601 |
602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
610 | SUCH DAMAGES.
611 |
612 | 17. Interpretation of Sections 15 and 16.
613 |
614 | If the disclaimer of warranty and limitation of liability provided
615 | above cannot be given local legal effect according to their terms,
616 | reviewing courts shall apply local law that most closely approximates
617 | an absolute waiver of all civil liability in connection with the
618 | Program, unless a warranty or assumption of liability accompanies a
619 | copy of the Program in return for a fee.
620 |
621 | END OF TERMS AND CONDITIONS
622 |
623 | How to Apply These Terms to Your New Programs
624 |
625 | If you develop a new program, and you want it to be of the greatest
626 | possible use to the public, the best way to achieve this is to make it
627 | free software which everyone can redistribute and change under these terms.
628 |
629 | To do so, attach the following notices to the program. It is safest
630 | to attach them to the start of each source file to most effectively
631 | state the exclusion of warranty; and each file should have at least
632 | the "copyright" line and a pointer to where the full notice is found.
633 |
634 |
635 | Copyright (C)
636 |
637 | This program is free software: you can redistribute it and/or modify
638 | it under the terms of the GNU General Public License as published by
639 | the Free Software Foundation, either version 3 of the License, or
640 | (at your option) any later version.
641 |
642 | This program is distributed in the hope that it will be useful,
643 | but WITHOUT ANY WARRANTY; without even the implied warranty of
644 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
645 | GNU General Public License for more details.
646 |
647 | You should have received a copy of the GNU General Public License
648 | along with this program. If not, see .
649 |
650 | Also add information on how to contact you by electronic and paper mail.
651 |
652 | If the program does terminal interaction, make it output a short
653 | notice like this when it starts in an interactive mode:
654 |
655 | Copyright (C)
656 | This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
657 | This is free software, and you are welcome to redistribute it
658 | under certain conditions; type `show c' for details.
659 |
660 | The hypothetical commands `show w' and `show c' should show the appropriate
661 | parts of the General Public License. Of course, your program's commands
662 | might be different; for a GUI interface, you would use an "about box".
663 |
664 | You should also get your employer (if you work as a programmer) or school,
665 | if any, to sign a "copyright disclaimer" for the program, if necessary.
666 | For more information on this, and how to apply and follow the GNU GPL, see
667 | .
668 |
669 | The GNU General Public License does not permit incorporating your program
670 | into proprietary programs. If your program is a subroutine library, you
671 | may consider it more useful to permit linking proprietary applications with
672 | the library. If this is what you want to do, use the GNU Lesser General
673 | Public License instead of this License. But first, please read
674 | .
--------------------------------------------------------------------------------
/face_datasets.py:
--------------------------------------------------------------------------------
1 | import glob
2 | import logging
3 | import math
4 | import os
5 | import random
6 | import shutil
7 | import time
8 | from itertools import repeat
9 | from multiprocessing.pool import ThreadPool
10 | from pathlib import Path
11 | from threading import Thread
12 |
13 | import cv2
14 | import numpy as np
15 | import torch
16 | from PIL import Image, ExifTags
17 | from torch.utils.data import Dataset
18 | from tqdm import tqdm
19 |
20 | from utils.general import xyxy2xywh, xywh2xyxy, clean_str
21 | from utils.torch_utils import torch_distributed_zero_first
22 |
23 |
24 | # Parameters
25 | help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
26 | img_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng'] # acceptable image suffixes
27 | vid_formats = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes
28 | logger = logging.getLogger(__name__)
29 |
30 | # Get orientation exif tag
31 | for orientation in ExifTags.TAGS.keys():
32 | if ExifTags.TAGS[orientation] == 'Orientation':
33 | break
34 |
35 | def get_hash(files):
36 | # Returns a single hash value of a list of files
37 | return sum(os.path.getsize(f) for f in files if os.path.isfile(f))
38 |
39 | def img2label_paths(img_paths):
40 | # Define label paths as a function of image paths
41 | sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings
42 | return [x.replace(sa, sb, 1).replace('.' + x.split('.')[-1], '.txt') for x in img_paths]
43 |
44 | def exif_size(img):
45 | # Returns exif-corrected PIL size
46 | s = img.size # (width, height)
47 | try:
48 | rotation = dict(img._getexif().items())[orientation]
49 | if rotation == 6: # rotation 270
50 | s = (s[1], s[0])
51 | elif rotation == 8: # rotation 90
52 | s = (s[1], s[0])
53 | except:
54 | pass
55 |
56 | return s
57 |
58 | class LoadFaceImagesAndLabels(Dataset): # for training/testing
59 | def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
60 | cache_images=False, single_cls=False, stride=32, pad=0.0, rank=-1):
61 | self.img_size = img_size
62 | self.augment = augment
63 | self.hyp = hyp
64 | self.image_weights = image_weights
65 | self.rect = False if image_weights else rect
66 | self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
67 | self.mosaic_border = [-img_size // 2, -img_size // 2]
68 | self.stride = stride
69 |
70 | try:
71 | f = [] # image files
72 | for p in path if isinstance(path, list) else [path]:
73 | p = Path(p) # os-agnostic
74 | if p.is_dir(): # dir
75 | f += glob.glob(str(p / '**' / '*.*'), recursive=True)
76 | elif p.is_file(): # file
77 | with open(p, 'r') as t:
78 | t = t.read().strip().splitlines()
79 | parent = str(p.parent) + os.sep
80 | f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
81 | else:
82 | raise Exception('%s does not exist' % p)
83 | self.img_files = sorted([x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in img_formats])
84 | assert self.img_files, 'No images found'
85 | except Exception as e:
86 | raise Exception('Error loading data from %s: %s\nSee %s' % (path, e, help_url))
87 |
88 | # Check cache
89 | self.label_files = img2label_paths(self.img_files) # labels
90 | cache_path = Path(self.label_files[0]).parent.with_suffix('.cache') # cached labels
91 | if cache_path.is_file():
92 | cache = torch.load(cache_path) # load
93 | if cache['hash'] != get_hash(self.label_files + self.img_files) or 'results' not in cache: # changed
94 | cache = self.cache_labels(cache_path) # re-cache
95 | else:
96 | cache = self.cache_labels(cache_path) # cache
97 |
98 | # Display cache
99 | [nf, nm, ne, nc, n] = cache.pop('results') # found, missing, empty, corrupted, total
100 | desc = f"Scanning '{cache_path}' for images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted"
101 | tqdm(None, desc=desc, total=n, initial=n)
102 | assert nf > 0 or not augment, f'No labels found in {cache_path}. Can not train without labels. See {help_url}'
103 |
104 | # Read cache
105 | cache.pop('hash') # remove hash
106 | labels, shapes = zip(*cache.values())
107 | self.labels = list(labels)
108 | self.shapes = np.array(shapes, dtype=np.float64)
109 | self.img_files = list(cache.keys()) # update
110 | self.label_files = img2label_paths(cache.keys()) # update
111 | if single_cls:
112 | for x in self.labels:
113 | x[:, 0] = 0
114 |
115 | n = len(shapes) # number of images
116 | bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
117 | nb = bi[-1] + 1 # number of batches
118 | self.batch = bi # batch index of image
119 | self.n = n
120 | self.indices = range(n)
121 |
122 | # Rectangular Training
123 | if self.rect:
124 | # Sort by aspect ratio
125 | s = self.shapes # wh
126 | ar = s[:, 1] / s[:, 0] # aspect ratio
127 | irect = ar.argsort()
128 | self.img_files = [self.img_files[i] for i in irect]
129 | self.label_files = [self.label_files[i] for i in irect]
130 | self.labels = [self.labels[i] for i in irect]
131 | self.shapes = s[irect] # wh
132 | ar = ar[irect]
133 |
134 | # Set training image shapes
135 | shapes = [[1, 1]] * nb
136 | for i in range(nb):
137 | ari = ar[bi == i]
138 | mini, maxi = ari.min(), ari.max()
139 | if maxi < 1:
140 | shapes[i] = [maxi, 1]
141 | elif mini > 1:
142 | shapes[i] = [1, 1 / mini]
143 |
144 | self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
145 |
146 | # Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
147 | self.imgs = [None] * n
148 | if cache_images:
149 | gb = 0 # Gigabytes of cached images
150 | self.img_hw0, self.img_hw = [None] * n, [None] * n
151 | results = ThreadPool(8).imap(lambda x: load_image(*x), zip(repeat(self), range(n))) # 8 threads
152 | pbar = tqdm(enumerate(results), total=n)
153 | for i, x in pbar:
154 | self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # img, hw_original, hw_resized = load_image(self, i)
155 | gb += self.imgs[i].nbytes
156 | pbar.desc = 'Caching images (%.1fGB)' % (gb / 1E9)
157 |
158 | def cache_labels(self, path=Path('./labels.cache')):
159 | # Cache dataset labels, check images and read shapes
160 | x = {} # dict
161 | nm, nf, ne, nc = 0, 0, 0, 0 # number missing, found, empty, duplicate
162 | pbar = tqdm(zip(self.img_files, self.label_files), desc='Scanning images', total=len(self.img_files))
163 | for i, (im_file, lb_file) in enumerate(pbar):
164 | try:
165 | # verify images
166 | im = Image.open(im_file)
167 | im.verify() # PIL verify
168 | shape = exif_size(im) # image size
169 | assert (shape[0] > 9) & (shape[1] > 9), 'image size <10 pixels'
170 |
171 | # verify labels
172 | if os.path.isfile(lb_file):
173 | nf += 1 # label found
174 | with open(lb_file, 'r') as f:
175 | l = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels
176 | if len(l):
177 | assert l.shape[1] == 15, 'labels require 15 columns each'
178 | assert (l >= -1).all(), 'negative labels'
179 | assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels'
180 | assert np.unique(l, axis=0).shape[0] == l.shape[0], 'duplicate labels'
181 | else:
182 | ne += 1 # label empty
183 | l = np.zeros((0, 15), dtype=np.float32)
184 | else:
185 | nm += 1 # label missing
186 | l = np.zeros((0, 15), dtype=np.float32)
187 | x[im_file] = [l, shape]
188 | except Exception as e:
189 | nc += 1
190 | print('WARNING: Ignoring corrupted image and/or label %s: %s' % (im_file, e))
191 |
192 | pbar.desc = f"Scanning '{path.parent / path.stem}' for images and labels... " \
193 | f"{nf} found, {nm} missing, {ne} empty, {nc} corrupted"
194 |
195 | if nf == 0:
196 | print(f'WARNING: No labels found in {path}. See {help_url}')
197 |
198 | x['hash'] = get_hash(self.label_files + self.img_files)
199 | x['results'] = [nf, nm, ne, nc, i + 1]
200 | torch.save(x, path) # save for next time
201 | logging.info(f"New cache created: {path}")
202 | return x
203 |
204 | def __len__(self):
205 | return len(self.img_files)
206 |
207 | # def __iter__(self):
208 | # self.count = -1
209 | # print('ran dataset iter')
210 | # #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
211 | # return self
212 |
213 | def __getitem__(self, index):
214 | index = self.indices[index] # linear, shuffled, or image_weights
215 |
216 | hyp = self.hyp
217 | mosaic = self.mosaic and random.random() < hyp['mosaic']
218 | if mosaic:
219 | # Load mosaic
220 | img, labels = load_mosaic_face(self, index)
221 | shapes = None
222 |
223 | # MixUp https://arxiv.org/pdf/1710.09412.pdf
224 | if random.random() < hyp['mixup']:
225 | img2, labels2 = load_mosaic_face(self, random.randint(0, self.n - 1))
226 | r = np.random.beta(8.0, 8.0) # mixup ratio, alpha=beta=8.0
227 | img = (img * r + img2 * (1 - r)).astype(np.uint8)
228 | labels = np.concatenate((labels, labels2), 0)
229 |
230 | else:
231 | # Load image
232 | img, (h0, w0), (h, w) = load_image(self, index)
233 |
234 | # Letterbox
235 | shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
236 | img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
237 | shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
238 |
239 | # Load labels
240 | labels = []
241 | x = self.labels[index]
242 | if x.size > 0:
243 | # Normalized xywh to pixel xyxy format
244 | labels = x.copy()
245 | labels[:, 1] = ratio[0] * w * (x[:, 1] - x[:, 3] / 2) + pad[0] # pad width
246 | labels[:, 2] = ratio[1] * h * (x[:, 2] - x[:, 4] / 2) + pad[1] # pad height
247 | labels[:, 3] = ratio[0] * w * (x[:, 1] + x[:, 3] / 2) + pad[0]
248 | labels[:, 4] = ratio[1] * h * (x[:, 2] + x[:, 4] / 2) + pad[1]
249 |
250 | #labels[:, 5] = ratio[0] * w * x[:, 5] + pad[0] # pad width
251 | labels[:, 5] = np.array(x[:, 5] > 0, dtype=np.int32) * (ratio[0] * w * x[:, 5] + pad[0]) + (
252 | np.array(x[:, 5] > 0, dtype=np.int32) - 1)
253 | labels[:, 6] = np.array(x[:, 6] > 0, dtype=np.int32) * (ratio[1] * h * x[:, 6] + pad[1]) + (
254 | np.array(x[:, 6] > 0, dtype=np.int32) - 1)
255 | labels[:, 7] = np.array(x[:, 7] > 0, dtype=np.int32) * (ratio[0] * w * x[:, 7] + pad[0]) + (
256 | np.array(x[:, 7] > 0, dtype=np.int32) - 1)
257 | labels[:, 8] = np.array(x[:, 8] > 0, dtype=np.int32) * (ratio[1] * h * x[:, 8] + pad[1]) + (
258 | np.array(x[:, 8] > 0, dtype=np.int32) - 1)
259 | labels[:, 9] = np.array(x[:, 5] > 0, dtype=np.int32) * (ratio[0] * w * x[:, 9] + pad[0]) + (
260 | np.array(x[:, 9] > 0, dtype=np.int32) - 1)
261 | labels[:, 10] = np.array(x[:, 5] > 0, dtype=np.int32) * (ratio[1] * h * x[:, 10] + pad[1]) + (
262 | np.array(x[:, 10] > 0, dtype=np.int32) - 1)
263 | labels[:, 11] = np.array(x[:, 11] > 0, dtype=np.int32) * (ratio[0] * w * x[:, 11] + pad[0]) + (
264 | np.array(x[:, 11] > 0, dtype=np.int32) - 1)
265 | labels[:, 12] = np.array(x[:, 12] > 0, dtype=np.int32) * (ratio[1] * h * x[:, 12] + pad[1]) + (
266 | np.array(x[:, 12] > 0, dtype=np.int32) - 1)
267 | labels[:, 13] = np.array(x[:, 13] > 0, dtype=np.int32) * (ratio[0] * w * x[:, 13] + pad[0]) + (
268 | np.array(x[:, 13] > 0, dtype=np.int32) - 1)
269 | labels[:, 14] = np.array(x[:, 14] > 0, dtype=np.int32) * (ratio[1] * h * x[:, 14] + pad[1]) + (
270 | np.array(x[:, 14] > 0, dtype=np.int32) - 1)
271 |
272 | if self.augment:
273 | # Augment imagespace
274 | if not mosaic:
275 | img, labels = random_perspective(img, labels,
276 | degrees=hyp['degrees'],
277 | translate=hyp['translate'],
278 | scale=hyp['scale'],
279 | shear=hyp['shear'],
280 | perspective=hyp['perspective'])
281 |
282 | # Augment colorspace
283 | augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
284 |
285 | # Apply cutouts
286 | # if random.random() < 0.9:
287 | # labels = cutout(img, labels)
288 |
289 | nL = len(labels) # number of labels
290 | if nL:
291 | labels[:, 1:5] = xyxy2xywh(labels[:, 1:5]) # convert xyxy to xywh
292 | labels[:, [2, 4]] /= img.shape[0] # normalized height 0-1
293 | labels[:, [1, 3]] /= img.shape[1] # normalized width 0-1
294 |
295 | labels[:, [5, 7, 9, 11, 13]] /= img.shape[1] # normalized landmark x 0-1
296 | labels[:, [5, 7, 9, 11, 13]] = np.where(labels[:, [5, 7, 9, 11, 13]] < 0, -1, labels[:, [5, 7, 9, 11, 13]])
297 | labels[:, [6, 8, 10, 12, 14]] /= img.shape[0] # normalized landmark y 0-1
298 | labels[:, [6, 8, 10, 12, 14]] = np.where(labels[:, [6, 8, 10, 12, 14]] < 0, -1, labels[:, [6, 8, 10, 12, 14]])
299 |
300 | if self.augment:
301 | # flip up-down
302 | if random.random() < hyp['flipud']:
303 | img = np.flipud(img)
304 | if nL:
305 | labels[:, 2] = 1 - labels[:, 2]
306 |
307 | labels[:, 6] = np.where(labels[:,6] < 0, -1, 1 - labels[:, 6])
308 | labels[:, 8] = np.where(labels[:, 8] < 0, -1, 1 - labels[:, 8])
309 | labels[:, 10] = np.where(labels[:, 10] < 0, -1, 1 - labels[:, 10])
310 | labels[:, 12] = np.where(labels[:, 12] < 0, -1, 1 - labels[:, 12])
311 | labels[:, 14] = np.where(labels[:, 14] < 0, -1, 1 - labels[:, 14])
312 |
313 | # flip left-right
314 | if random.random() < hyp['fliplr']:
315 | img = np.fliplr(img)
316 | if nL:
317 | labels[:, 1] = 1 - labels[:, 1]
318 |
319 | labels[:, 5] = np.where(labels[:, 5] < 0, -1, 1 - labels[:, 5])
320 | labels[:, 7] = np.where(labels[:, 7] < 0, -1, 1 - labels[:, 7])
321 | labels[:, 9] = np.where(labels[:, 9] < 0, -1, 1 - labels[:, 9])
322 | labels[:, 11] = np.where(labels[:, 11] < 0, -1, 1 - labels[:, 11])
323 | labels[:, 13] = np.where(labels[:, 13] < 0, -1, 1 - labels[:, 13])
324 |
325 | #左右镜像的时候,左眼、右眼, 左嘴角、右嘴角无法区分, 应该交换位置,便于网络学习
326 | eye_left = np.copy(labels[:, [5, 6]])
327 | mouth_left = np.copy(labels[:, [11, 12]])
328 | labels[:, [5, 6]] = labels[:, [7, 8]]
329 | labels[:, [7, 8]] = eye_left
330 | labels[:, [11, 12]] = labels[:, [13, 14]]
331 | labels[:, [13, 14]] = mouth_left
332 |
333 | labels_out = torch.zeros((nL, 16))
334 | if nL:
335 | labels_out[:, 1:] = torch.from_numpy(labels)
336 | #showlabels(img, labels[:, 1:5], labels[:, 5:15])
337 |
338 | # Convert
339 | img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
340 | img = np.ascontiguousarray(img)
341 | #print(index, ' --- labels_out: ', labels_out)
342 | #if nL:
343 | #print( ' : landmarks : ', torch.max(labels_out[:, 5:15]), ' --- ', torch.min(labels_out[:, 5:15]))
344 | return torch.from_numpy(img), labels_out, self.img_files[index], shapes
345 |
346 | @staticmethod
347 | def collate_fn(batch):
348 | img, label, path, shapes = zip(*batch) # transposed
349 | for i, l in enumerate(label):
350 | l[:, 0] = i # add target image index for build_targets()
351 | return torch.stack(img, 0), torch.cat(label, 0), path, shapes
352 |
353 |
354 | def showlabels(img, boxs, landmarks):
355 | for box in boxs:
356 | x,y,w,h = box[0] * img.shape[1], box[1] * img.shape[0], box[2] * img.shape[1], box[3] * img.shape[0]
357 | #cv2.rectangle(image, (x,y), (x+w,y+h), (0,255,0), 2)
358 | cv2.rectangle(img, (int(x - w/2), int(y - h/2)), (int(x + w/2), int(y + h/2)), (0, 255, 0), 2)
359 |
360 | for landmark in landmarks:
361 | #cv2.circle(img,(60,60),30,(0,0,255))
362 | for i in range(5):
363 | cv2.circle(img, (int(landmark[2*i] * img.shape[1]), int(landmark[2*i+1]*img.shape[0])), 3 ,(0,0,255), -1)
364 | cv2.imshow('test', img)
365 | cv2.waitKey(0)
366 |
367 |
368 | def load_mosaic_face(self, index):
369 | # loads images in a mosaic
370 | labels4 = []
371 | s = self.img_size
372 | yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y
373 | indices = [index] + [self.indices[random.randint(0, self.n - 1)] for _ in range(3)] # 3 additional image indices
374 | for i, index in enumerate(indices):
375 | # Load image
376 | img, _, (h, w) = load_image(self, index)
377 |
378 | # place img in img4
379 | if i == 0: # top left
380 | img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
381 | x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
382 | x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
383 | elif i == 1: # top right
384 | x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
385 | x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
386 | elif i == 2: # bottom left
387 | x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
388 | x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
389 | elif i == 3: # bottom right
390 | x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
391 | x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
392 |
393 | img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
394 | padw = x1a - x1b
395 | padh = y1a - y1b
396 |
397 | # Labels
398 | x = self.labels[index]
399 | labels = x.copy()
400 | if x.size > 0: # Normalized xywh to pixel xyxy format
401 | #box, x1,y1,x2,y2
402 | labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padw
403 | labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + padh
404 | labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padw
405 | labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + padh
406 | #10 landmarks
407 |
408 | labels[:, 5] = np.array(x[:, 5] > 0, dtype=np.int32) * (w * x[:, 5] + padw) + (np.array(x[:, 5] > 0, dtype=np.int32) - 1)
409 | labels[:, 6] = np.array(x[:, 6] > 0, dtype=np.int32) * (h * x[:, 6] + padh) + (np.array(x[:, 6] > 0, dtype=np.int32) - 1)
410 | labels[:, 7] = np.array(x[:, 7] > 0, dtype=np.int32) * (w * x[:, 7] + padw) + (np.array(x[:, 7] > 0, dtype=np.int32) - 1)
411 | labels[:, 8] = np.array(x[:, 8] > 0, dtype=np.int32) * (h * x[:, 8] + padh) + (np.array(x[:, 8] > 0, dtype=np.int32) - 1)
412 | labels[:, 9] = np.array(x[:, 9] > 0, dtype=np.int32) * (w * x[:, 9] + padw) + (np.array(x[:, 9] > 0, dtype=np.int32) - 1)
413 | labels[:, 10] = np.array(x[:, 10] > 0, dtype=np.int32) * (h * x[:, 10] + padh) + (np.array(x[:, 10] > 0, dtype=np.int32) - 1)
414 | labels[:, 11] = np.array(x[:, 11] > 0, dtype=np.int32) * (w * x[:, 11] + padw) + (np.array(x[:, 11] > 0, dtype=np.int32) - 1)
415 | labels[:, 12] = np.array(x[:, 12] > 0, dtype=np.int32) * (h * x[:, 12] + padh) + (np.array(x[:, 12] > 0, dtype=np.int32) - 1)
416 | labels[:, 13] = np.array(x[:, 13] > 0, dtype=np.int32) * (w * x[:, 13] + padw) + (np.array(x[:, 13] > 0, dtype=np.int32) - 1)
417 | labels[:, 14] = np.array(x[:, 14] > 0, dtype=np.int32) * (h * x[:, 14] + padh) + (np.array(x[:, 14] > 0, dtype=np.int32) - 1)
418 | labels4.append(labels)
419 |
420 | # Concat/clip labels
421 | if len(labels4):
422 | labels4 = np.concatenate(labels4, 0)
423 | np.clip(labels4[:, 1:5], 0, 2 * s, out=labels4[:, 1:5]) # use with random_perspective
424 | # img4, labels4 = replicate(img4, labels4) # replicate
425 |
426 | #landmarks
427 | labels4[:, 5:] = np.where(labels4[:, 5:] < 0, -1, labels4[:, 5:])
428 | labels4[:, 5:] = np.where(labels4[:, 5:] > 2 * s, -1, labels4[:, 5:])
429 |
430 | labels4[:, 5] = np.where(labels4[:, 6] == -1, -1, labels4[:, 5])
431 | labels4[:, 6] = np.where(labels4[:, 5] == -1, -1, labels4[:, 6])
432 |
433 | labels4[:, 7] = np.where(labels4[:, 8] == -1, -1, labels4[:, 7])
434 | labels4[:, 8] = np.where(labels4[:, 7] == -1, -1, labels4[:, 8])
435 |
436 | labels4[:, 9] = np.where(labels4[:, 10] == -1, -1, labels4[:, 9])
437 | labels4[:, 10] = np.where(labels4[:, 9] == -1, -1, labels4[:, 10])
438 |
439 | labels4[:, 11] = np.where(labels4[:, 12] == -1, -1, labels4[:, 11])
440 | labels4[:, 12] = np.where(labels4[:, 11] == -1, -1, labels4[:, 12])
441 |
442 | labels4[:, 13] = np.where(labels4[:, 14] == -1, -1, labels4[:, 13])
443 | labels4[:, 14] = np.where(labels4[:, 13] == -1, -1, labels4[:, 14])
444 |
445 | # Augment
446 | img4, labels4 = random_perspective(img4, labels4,
447 | degrees=self.hyp['degrees'],
448 | translate=self.hyp['translate'],
449 | scale=self.hyp['scale'],
450 | shear=self.hyp['shear'],
451 | perspective=self.hyp['perspective'],
452 | border=self.mosaic_border) # border to remove
453 | return img4, labels4
454 |
455 |
456 | # Ancillary functions --------------------------------------------------------------------------------------------------
457 | def load_image(self, index):
458 | # loads 1 image from dataset, returns img, original hw, resized hw
459 | img = self.imgs[index]
460 | if img is None: # not cached
461 | path = self.img_files[index]
462 | img = cv2.imread(path) # BGR
463 | assert img is not None, 'Image Not Found ' + path
464 | h0, w0 = img.shape[:2] # orig hw
465 | r = self.img_size / max(h0, w0) # resize image to img_size
466 | if r != 1: # always resize down, only resize up if training with augmentation
467 | interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR
468 | img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
469 | return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized
470 | else:
471 | return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized
472 |
473 |
474 | def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
475 | r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
476 | hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
477 | dtype = img.dtype # uint8
478 |
479 | x = np.arange(0, 256, dtype=np.int16)
480 | lut_hue = ((x * r[0]) % 180).astype(dtype)
481 | lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
482 | lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
483 |
484 | img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)
485 | cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
486 |
487 | # Histogram equalization
488 | # if random.random() < 0.2:
489 | # for i in range(3):
490 | # img[:, :, i] = cv2.equalizeHist(img[:, :, i])
491 |
492 | def replicate(img, labels):
493 | # Replicate labels
494 | h, w = img.shape[:2]
495 | boxes = labels[:, 1:].astype(int)
496 | x1, y1, x2, y2 = boxes.T
497 | s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels)
498 | for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices
499 | x1b, y1b, x2b, y2b = boxes[i]
500 | bh, bw = y2b - y1b, x2b - x1b
501 | yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y
502 | x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh]
503 | img[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
504 | labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0)
505 |
506 | return img, labels
507 |
508 |
509 | def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True):
510 | # Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
511 | shape = img.shape[:2] # current shape [height, width]
512 | if isinstance(new_shape, int):
513 | new_shape = (new_shape, new_shape)
514 |
515 | # Scale ratio (new / old)
516 | r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
517 | if not scaleup: # only scale down, do not scale up (for better test mAP)
518 | r = min(r, 1.0)
519 |
520 | # Compute padding
521 | ratio = r, r # width, height ratios
522 | new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
523 | dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
524 | if auto: # minimum rectangle
525 | dw, dh = np.mod(dw, 32), np.mod(dh, 32) # wh padding
526 | elif scaleFill: # stretch
527 | dw, dh = 0.0, 0.0
528 | new_unpad = (new_shape[1], new_shape[0])
529 | ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
530 |
531 | dw /= 2 # divide padding into 2 sides
532 | dh /= 2
533 |
534 | if shape[::-1] != new_unpad: # resize
535 | img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
536 | top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
537 | left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
538 | img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
539 | return img, ratio, (dw, dh)
540 |
541 |
542 | def random_perspective(img, targets=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0, border=(0, 0)):
543 | # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
544 | # targets = [cls, xyxy]
545 |
546 | height = img.shape[0] + border[0] * 2 # shape(h,w,c)
547 | width = img.shape[1] + border[1] * 2
548 |
549 | # Center
550 | C = np.eye(3)
551 | C[0, 2] = -img.shape[1] / 2 # x translation (pixels)
552 | C[1, 2] = -img.shape[0] / 2 # y translation (pixels)
553 |
554 | # Perspective
555 | P = np.eye(3)
556 | P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)
557 | P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)
558 |
559 | # Rotation and Scale
560 | R = np.eye(3)
561 | a = random.uniform(-degrees, degrees)
562 | # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
563 | s = random.uniform(1 - scale, 1 + scale)
564 | # s = 2 ** random.uniform(-scale, scale)
565 | R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
566 |
567 | # Shear
568 | S = np.eye(3)
569 | S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
570 | S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
571 |
572 | # Translation
573 | T = np.eye(3)
574 | T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels)
575 | T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels)
576 |
577 | # Combined rotation matrix
578 | M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT
579 | if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
580 | if perspective:
581 | img = cv2.warpPerspective(img, M, dsize=(width, height), borderValue=(114, 114, 114))
582 | else: # affine
583 | img = cv2.warpAffine(img, M[:2], dsize=(width, height), borderValue=(114, 114, 114))
584 |
585 | # Visualize
586 | # import matplotlib.pyplot as plt
587 | # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()
588 | # ax[0].imshow(img[:, :, ::-1]) # base
589 | # ax[1].imshow(img2[:, :, ::-1]) # warped
590 |
591 | # Transform label coordinates
592 | n = len(targets)
593 | if n:
594 | # warp points
595 | #xy = np.ones((n * 4, 3))
596 | xy = np.ones((n * 9, 3))
597 | xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]].reshape(n * 9, 2) # x1y1, x2y2, x1y2, x2y1
598 | xy = xy @ M.T # transform
599 | if perspective:
600 | xy = (xy[:, :2] / xy[:, 2:3]).reshape(n, 18) # rescale
601 | else: # affine
602 | xy = xy[:, :2].reshape(n, 18)
603 |
604 | # create new boxes
605 | x = xy[:, [0, 2, 4, 6]]
606 | y = xy[:, [1, 3, 5, 7]]
607 |
608 | landmarks = xy[:, [8, 9, 10, 11, 12, 13, 14, 15, 16, 17]]
609 | mask = np.array(targets[:, 5:] > 0, dtype=np.int32)
610 | landmarks = landmarks * mask
611 | landmarks = landmarks + mask - 1
612 |
613 | landmarks = np.where(landmarks < 0, -1, landmarks)
614 | landmarks[:, [0, 2, 4, 6, 8]] = np.where(landmarks[:, [0, 2, 4, 6, 8]] > width, -1, landmarks[:, [0, 2, 4, 6, 8]])
615 | landmarks[:, [1, 3, 5, 7, 9]] = np.where(landmarks[:, [1, 3, 5, 7, 9]] > height, -1,landmarks[:, [1, 3, 5, 7, 9]])
616 |
617 | landmarks[:, 0] = np.where(landmarks[:, 1] == -1, -1, landmarks[:, 0])
618 | landmarks[:, 1] = np.where(landmarks[:, 0] == -1, -1, landmarks[:, 1])
619 |
620 | landmarks[:, 2] = np.where(landmarks[:, 3] == -1, -1, landmarks[:, 2])
621 | landmarks[:, 3] = np.where(landmarks[:, 2] == -1, -1, landmarks[:, 3])
622 |
623 | landmarks[:, 4] = np.where(landmarks[:, 5] == -1, -1, landmarks[:, 4])
624 | landmarks[:, 5] = np.where(landmarks[:, 4] == -1, -1, landmarks[:, 5])
625 |
626 | landmarks[:, 6] = np.where(landmarks[:, 7] == -1, -1, landmarks[:, 6])
627 | landmarks[:, 7] = np.where(landmarks[:, 6] == -1, -1, landmarks[:, 7])
628 |
629 | landmarks[:, 8] = np.where(landmarks[:, 9] == -1, -1, landmarks[:, 8])
630 | landmarks[:, 9] = np.where(landmarks[:, 8] == -1, -1, landmarks[:, 9])
631 |
632 | targets[:,5:] = landmarks
633 |
634 | xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
635 |
636 | # # apply angle-based reduction of bounding boxes
637 | # radians = a * math.pi / 180
638 | # reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
639 | # x = (xy[:, 2] + xy[:, 0]) / 2
640 | # y = (xy[:, 3] + xy[:, 1]) / 2
641 | # w = (xy[:, 2] - xy[:, 0]) * reduction
642 | # h = (xy[:, 3] - xy[:, 1]) * reduction
643 | # xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T
644 |
645 | # clip boxes
646 | xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
647 | xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
648 |
649 | # filter candidates
650 | i = box_candidates(box1=targets[:, 1:5].T * s, box2=xy.T)
651 | targets = targets[i]
652 | targets[:, 1:5] = xy[i]
653 |
654 | return img, targets
655 |
656 |
657 | def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1): # box1(4,n), box2(4,n)
658 | # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio
659 | w1, h1 = box1[2] - box1[0], box1[3] - box1[1]
660 | w2, h2 = box2[2] - box2[0], box2[3] - box2[1]
661 | ar = np.maximum(w2 / (h2 + 1e-16), h2 / (w2 + 1e-16)) # aspect ratio
662 | return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + 1e-16) > area_thr) & (ar < ar_thr) # candidates
663 |
664 |
665 | def cutout(image, labels):
666 | # Applies image cutout augmentation https://arxiv.org/abs/1708.04552
667 | h, w = image.shape[:2]
668 |
669 | def bbox_ioa(box1, box2):
670 | # Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2
671 | box2 = box2.transpose()
672 |
673 | # Get the coordinates of bounding boxes
674 | b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
675 | b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
676 |
677 | # Intersection area
678 | inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \
679 | (np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)
680 |
681 | # box2 area
682 | box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16
683 |
684 | # Intersection over box2 area
685 | return inter_area / box2_area
686 |
687 | # create random masks
688 | scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction
689 | for s in scales:
690 | mask_h = random.randint(1, int(h * s))
691 | mask_w = random.randint(1, int(w * s))
692 |
693 | # box
694 | xmin = max(0, random.randint(0, w) - mask_w // 2)
695 | ymin = max(0, random.randint(0, h) - mask_h // 2)
696 | xmax = min(w, xmin + mask_w)
697 | ymax = min(h, ymin + mask_h)
698 |
699 | # apply random color mask
700 | image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
701 |
702 | # return unobscured labels
703 | if len(labels) and s > 0.03:
704 | box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
705 | ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
706 | labels = labels[ioa < 0.60] # remove >60% obscured labels
707 |
708 | return labels
709 |
710 |
711 | def create_folder(path='./new'):
712 | # Create folder
713 | if os.path.exists(path):
714 | shutil.rmtree(path) # delete output folder
715 | os.makedirs(path) # make new output folder
716 |
717 |
718 | def flatten_recursive(path='../coco128'):
719 | # Flatten a recursive directory by bringing all files to top level
720 | new_path = Path(path + '_flat')
721 | create_folder(new_path)
722 | for file in tqdm(glob.glob(str(Path(path)) + '/**/*.*', recursive=True)):
723 | shutil.copyfile(file, new_path / Path(file).name)
724 |
725 |
726 | def extract_boxes(path='../coco128/'): # from utils.datasets import *; extract_boxes('../coco128')
727 | # Convert detection dataset into classification dataset, with one directory per class
728 |
729 | path = Path(path) # images dir
730 | shutil.rmtree(path / 'classifier') if (path / 'classifier').is_dir() else None # remove existing
731 | files = list(path.rglob('*.*'))
732 | n = len(files) # number of files
733 | for im_file in tqdm(files, total=n):
734 | if im_file.suffix[1:] in img_formats:
735 | # image
736 | im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB
737 | h, w = im.shape[:2]
738 |
739 | # labels
740 | lb_file = Path(img2label_paths([str(im_file)])[0])
741 | if Path(lb_file).exists():
742 | with open(lb_file, 'r') as f:
743 | lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels
744 |
745 | for j, x in enumerate(lb):
746 | c = int(x[0]) # class
747 | f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename
748 | if not f.parent.is_dir():
749 | f.parent.mkdir(parents=True)
750 |
751 | b = x[1:] * [w, h, w, h] # box
752 | # b[2:] = b[2:].max() # rectangle to square
753 | b[2:] = b[2:] * 1.2 + 3 # pad
754 | b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
755 |
756 | b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
757 | b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
758 | assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}'
759 |
760 |
761 | def autosplit(path='../coco128', weights=(0.9, 0.1, 0.0)): # from utils.datasets import *; autosplit('../coco128')
762 | """ Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files
763 | # Arguments
764 | path: Path to images directory
765 | weights: Train, val, test weights (list)
766 | """
767 | path = Path(path) # images dir
768 | files = list(path.rglob('*.*'))
769 | n = len(files) # number of files
770 | indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split
771 | txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files
772 | [(path / x).unlink() for x in txt if (path / x).exists()] # remove existing
773 | for i, img in tqdm(zip(indices, files), total=n):
774 | if img.suffix[1:] in img_formats:
775 | with open(path / txt[i], 'a') as f:
776 | f.write(str(img) + '\n') # add image to txt file
777 |
--------------------------------------------------------------------------------