├── output
└── .keepgit.txt
├── example
├── PR_curve_each_class.png
└── 20190808_041204.log.json_result.png
├── LICENSE
├── README.md
├── voc_eval_visualize.py
├── visualize.py
├── mean_ap_visualize.py
└── json
└── 20190808_041204.log.json
/output/.keepgit.txt:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/example/PR_curve_each_class.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Stephenfang51/mmdetection_visualize/HEAD/example/PR_curve_each_class.png
--------------------------------------------------------------------------------
/example/20190808_041204.log.json_result.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Stephenfang51/mmdetection_visualize/HEAD/example/20190808_041204.log.json_result.png
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2019 StephenFang
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # mmdetection_visualize_v1
2 |
3 | It's a very simple version for visualizing the training result produced by mmdetection
4 |
5 | ### Update
6 | > 2019.8.16 ----- PR_curve, F_measure for VOC dataset
7 |
8 | ### Readme
9 | The program supports drawing six training result and the most important evaluation tool:PR curve(only for VOC now)
10 |
11 | 1. loss_rpn_bbox
12 | 2. loss_rpn_cls
13 | 3. loss_bbox
14 | 4. loss_cls
15 | 5. loss
16 | 6. acc
17 |
18 |
19 | 7. PR_curve
20 | 8. F-measure
21 |
22 |
23 | ### Installation
24 |
25 | 1. Clone it
26 | `git clone https://github.com/Stephenfang51/mmdetection_visualize`
27 |
28 | There will be total 5 files(json directory, output directory, visualize.py, mean_ap_visualize.py, voc_eval_visualize.py)
29 |
30 | 2.
31 | - put `voc_eval_visualize.py` under `/mmdetection/tools/`
32 |
33 | - put `mean_ap_visualize.py` under `mmdetection/mmdet/core/evaluation/`
34 |
35 | ### How to use
36 |
37 | #### six training result
38 | 1. After training finished, you will have **work_dir** directory in your mmdetection directory
39 | 2. take the latest json file and put into json directory in mmditection_visualize directory
40 | 3. command `python visualize.py json/xxxxxxxlog.json` in terminal
41 | 4. check the output directory, Done !
42 |
43 | #### PR curve and F-measure
44 | 1. make sure `voc_eval_visualize.py` and `mean_ap_visualize.py` settled down
45 | 2. command as usual like `python tools/voc_eval_visualize.py {your pkl file} {your network configs file}`
46 | - example `python tools/voc_eval_visualize.py result.pkl ./configs/faster_rcnn_r101_fpn_1x.py`
47 | 3. check the /mmdetection main directory, you will see the **PR_curve_each_class.png** there, Done !
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
--------------------------------------------------------------------------------
/voc_eval_visualize.py:
--------------------------------------------------------------------------------
1 | from argparse import ArgumentParser
2 |
3 | import mmcv
4 | import numpy as np
5 |
6 | from mmdet import datasets
7 | from mmdet.core.evaluation.mean_ap_visualize import map_roc_pr
8 |
9 |
10 | def voc_eval(result_file, dataset, iou_thr=0.5):
11 | det_results = mmcv.load(result_file)
12 | gt_bboxes = []
13 | gt_labels = []
14 | gt_ignore = []
15 | for i in range(len(dataset)):
16 | ann = dataset.get_ann_info(i)
17 | bboxes = ann['bboxes']
18 | labels = ann['labels']
19 | if 'bboxes_ignore' in ann:
20 | ignore = np.concatenate([
21 | np.zeros(bboxes.shape[0], dtype=np.bool),
22 | np.ones(ann['bboxes_ignore'].shape[0], dtype=np.bool)
23 | ])
24 | gt_ignore.append(ignore)
25 | bboxes = np.vstack([bboxes, ann['bboxes_ignore']])
26 | labels = np.concatenate([labels, ann['labels_ignore']])
27 | gt_bboxes.append(bboxes)
28 | gt_labels.append(labels)
29 | if not gt_ignore:
30 | gt_ignore = gt_ignore
31 | if hasattr(dataset, 'year') and dataset.year == 2007:
32 | dataset_name = 'voc07'
33 | else:
34 | dataset_name = dataset.CLASSES
35 | map_roc_pr(
36 | det_results,
37 | gt_bboxes,
38 | gt_labels,
39 | gt_ignore=gt_ignore,
40 | scale_ranges=None,
41 | iou_thr=iou_thr,
42 | dataset=dataset_name,
43 | print_summary=True)
44 |
45 |
46 | def main():
47 | parser = ArgumentParser(description='VOC Evaluation')
48 | parser.add_argument('result', help='result file path')
49 | parser.add_argument('config', help='config file path')
50 | parser.add_argument(
51 | '--iou-thr',
52 | type=float,
53 | default=0.5,
54 | help='IoU threshold for evaluation')
55 | args = parser.parse_args()
56 | cfg = mmcv.Config.fromfile(args.config)
57 | test_dataset = mmcv.runner.obj_from_dict(cfg.data.test, datasets)
58 | voc_eval(args.result, test_dataset, args.iou_thr)
59 |
60 |
61 | if __name__ == '__main__':
62 | main()
63 |
--------------------------------------------------------------------------------
/visualize.py:
--------------------------------------------------------------------------------
1 | import json
2 | import matplotlib.pyplot as plt
3 | import sys
4 | import os
5 | from collections import OrderedDict
6 |
7 | class visualize_mmdetection():
8 | def __init__(self, path):
9 | self.log = open(path)
10 | self.dict_list = list()
11 | self.loss_rpn_bbox = list()
12 | self.loss_rpn_cls = list()
13 | self.loss_bbox = list()
14 | self.loss_cls = list()
15 | self.loss = list()
16 | self.acc = list()
17 |
18 | def load_data(self):
19 | for line in self.log:
20 | info = json.loads(line)
21 | self.dict_list.append(info)
22 |
23 | for i in range(1, len(self.dict_list)):
24 | for value, key in dict(self.dict_list[i]).items():
25 | # ------------find key for every iter-------------------#
26 | loss_rpn_cls_value = dict(self.dict_list[i])['loss_rpn_cls']
27 | loss_rpn_bbox_value = dict(self.dict_list[i])['loss_rpn_bbox']
28 | loss_bbox_value = dict(self.dict_list[i])['loss_bbox']
29 | loss_cls_value = dict(self.dict_list[i])['loss_cls']
30 | loss_value = dict(self.dict_list[i])['loss']
31 | acc_value = dict(self.dict_list[i])['acc']
32 | # -------------list append------------------------------#
33 | self.loss_rpn_cls.append(loss_rpn_cls_value)
34 | self.loss_rpn_bbox.append(loss_rpn_bbox_value)
35 | self.loss_bbox.append(loss_bbox_value)
36 | self.loss_cls.append(loss_cls_value)
37 | self.loss.append(loss_value)
38 | self.acc.append(acc_value)
39 | # -------------clear repeated value---------------------#
40 | self.loss_rpn_cls = list(OrderedDict.fromkeys(self.loss_rpn_cls))
41 | self.loss_rpn_bbox = list(OrderedDict.fromkeys(self.loss_rpn_bbox))
42 | self.loss_bbox = list(OrderedDict.fromkeys(self.loss_bbox))
43 | self.loss_cls = list(OrderedDict.fromkeys(self.loss_cls))
44 | self.loss = list(OrderedDict.fromkeys(self.loss))
45 | self.acc = list(OrderedDict.fromkeys(self.acc))
46 |
47 | def show_chart(self):
48 | plt.rcParams.update({'font.size': 15})
49 |
50 | plt.figure(figsize=(20, 20))
51 |
52 | plt.subplot(321, title='loss_rpn_cls', ylabel='loss')
53 | plt.plot(self.loss_rpn_cls)
54 | plt.subplot(322, title='loss_rpn_bbox', ylabel='loss')
55 | plt.plot(self.loss_rpn_bbox)
56 |
57 | plt.subplot(323, title='loss_cls', ylabel='loss')
58 | plt.plot(self.loss_cls)
59 | plt.subplot(324, title='loss_bbox', ylabel='loss')
60 | plt.plot(self.loss_bbox)
61 | plt.subplot(325, title='total loss', ylabel='loss')
62 | plt.plot(self.loss)
63 | plt.subplot(326, title='accuracy', ylabel='accuracy')
64 | plt.plot(self.acc)
65 | plt.suptitle((sys.argv[1][5:] + "\n training result"), fontsize=30)
66 | plt.savefig(('output/' + sys.argv[1][5:] + '_result.png'))
67 |
68 |
69 | if __name__ == '__main__':
70 | x = visualize_mmdetection(sys.argv[1])
71 | x.load_data()
72 | x.show_chart()
--------------------------------------------------------------------------------
/mean_ap_visualize.py:
--------------------------------------------------------------------------------
1 | import mmcv
2 | import numpy as np
3 | import matplotlib.pyplot as plt
4 | from terminaltables import AsciiTable
5 |
6 | from .bbox_overlaps import bbox_overlaps
7 | from .class_names import get_classes
8 |
9 |
10 | def average_precision(recalls, precisions, mode='area'):
11 | """Calculate average precision (for single or multiple scales).
12 |
13 | Args:
14 | recalls (ndarray): shape (num_scales, num_dets) or (num_dets, )
15 | precisions (ndarray): shape (num_scales, num_dets) or (num_dets, )
16 | mode (str): 'area' or '11points', 'area' means calculating the area
17 | under precision-recall curve, '11points' means calculating
18 | the average precision of recalls at [0, 0.1, ..., 1]
19 |
20 | Returns:
21 | float or ndarray: calculated average precision
22 | """
23 | no_scale = False
24 | if recalls.ndim == 1:
25 | no_scale = True
26 | recalls = recalls[np.newaxis, :]
27 | precisions = precisions[np.newaxis, :]
28 | assert recalls.shape == precisions.shape and recalls.ndim == 2
29 | num_scales = recalls.shape[0]
30 | ap = np.zeros(num_scales, dtype=np.float32)
31 | if mode == 'area':
32 | zeros = np.zeros((num_scales, 1), dtype=recalls.dtype)
33 | ones = np.ones((num_scales, 1), dtype=recalls.dtype)
34 | mrec = np.hstack((zeros, recalls, ones))
35 | mpre = np.hstack((zeros, precisions, zeros))
36 | for i in range(mpre.shape[1] - 1, 0, -1):
37 | mpre[:, i - 1] = np.maximum(mpre[:, i - 1], mpre[:, i])
38 | for i in range(num_scales):
39 | ind = np.where(mrec[i, 1:] != mrec[i, :-1])[0]
40 | ap[i] = np.sum(
41 | (mrec[i, ind + 1] - mrec[i, ind]) * mpre[i, ind + 1])
42 | elif mode == '11points':
43 | for i in range(num_scales):
44 | for thr in np.arange(0, 1 + 1e-3, 0.1):
45 | precs = precisions[i, recalls[i, :] >= thr]
46 | prec = precs.max() if precs.size > 0 else 0
47 | ap[i] += prec
48 | ap /= 11
49 | else:
50 | raise ValueError(
51 | 'Unrecognized mode, only "area" and "11points" are supported')
52 | if no_scale:
53 | ap = ap[0]
54 | return ap
55 |
56 |
57 | def tpfp_imagenet(det_bboxes,
58 | gt_bboxes,
59 | gt_ignore,
60 | default_iou_thr,
61 | area_ranges=None):
62 | """Check if detected bboxes are true positive or false positive.
63 |
64 | Args:
65 | det_bbox (ndarray): the detected bbox
66 | gt_bboxes (ndarray): ground truth bboxes of this image
67 | gt_ignore (ndarray): indicate if gts are ignored for evaluation or not
68 | default_iou_thr (float): the iou thresholds for medium and large bboxes
69 | area_ranges (list or None): gt bbox area ranges
70 |
71 | Returns:
72 | tuple: two arrays (tp, fp) whose elements are 0 and 1
73 | """
74 | num_dets = det_bboxes.shape[0]
75 | num_gts = gt_bboxes.shape[0]
76 | if area_ranges is None:
77 | area_ranges = [(None, None)]
78 | num_scales = len(area_ranges)
79 | # tp and fp are of shape (num_scales, num_gts), each row is tp or fp
80 | # of a certain scale.
81 | tp = np.zeros((num_scales, num_dets), dtype=np.float32)
82 | fp = np.zeros((num_scales, num_dets), dtype=np.float32)
83 | if gt_bboxes.shape[0] == 0:
84 | if area_ranges == [(None, None)]:
85 | fp[...] = 1
86 | else:
87 | det_areas = (det_bboxes[:, 2] - det_bboxes[:, 0] + 1) * (
88 | det_bboxes[:, 3] - det_bboxes[:, 1] + 1)
89 | for i, (min_area, max_area) in enumerate(area_ranges):
90 | fp[i, (det_areas >= min_area) & (det_areas < max_area)] = 1
91 | return tp, fp
92 | ious = bbox_overlaps(det_bboxes, gt_bboxes - 1)
93 | gt_w = gt_bboxes[:, 2] - gt_bboxes[:, 0] + 1
94 | gt_h = gt_bboxes[:, 3] - gt_bboxes[:, 1] + 1
95 | iou_thrs = np.minimum((gt_w * gt_h) / ((gt_w + 10.0) * (gt_h + 10.0)),
96 | default_iou_thr)
97 | # sort all detections by scores in descending order
98 | sort_inds = np.argsort(-det_bboxes[:, -1])
99 | for k, (min_area, max_area) in enumerate(area_ranges):
100 | gt_covered = np.zeros(num_gts, dtype=bool)
101 | # if no area range is specified, gt_area_ignore is all False
102 | if min_area is None:
103 | gt_area_ignore = np.zeros_like(gt_ignore, dtype=bool)
104 | else:
105 | gt_areas = gt_w * gt_h
106 | gt_area_ignore = (gt_areas < min_area) | (gt_areas >= max_area)
107 | for i in sort_inds:
108 | max_iou = -1
109 | matched_gt = -1
110 | # find best overlapped available gt
111 | for j in range(num_gts):
112 | # different from PASCAL VOC: allow finding other gts if the
113 | # best overlaped ones are already matched by other det bboxes
114 | if gt_covered[j]:
115 | continue
116 | elif ious[i, j] >= iou_thrs[j] and ious[i, j] > max_iou:
117 | max_iou = ious[i, j]
118 | matched_gt = j
119 | # there are 4 cases for a det bbox:
120 | # 1. it matches a gt, tp = 1, fp = 0
121 | # 2. it matches an ignored gt, tp = 0, fp = 0
122 | # 3. it matches no gt and within area range, tp = 0, fp = 1
123 | # 4. it matches no gt but is beyond area range, tp = 0, fp = 0
124 | if matched_gt >= 0:
125 | gt_covered[matched_gt] = 1
126 | if not (gt_ignore[matched_gt] or gt_area_ignore[matched_gt]):
127 | tp[k, i] = 1
128 | elif min_area is None:
129 | fp[k, i] = 1
130 | else:
131 | bbox = det_bboxes[i, :4]
132 | area = (bbox[2] - bbox[0] + 1) * (bbox[3] - bbox[1] + 1)
133 | if area >= min_area and area < max_area:
134 | fp[k, i] = 1
135 | return tp, fp
136 |
137 |
138 | def tpfp_default(det_bboxes, gt_bboxes, gt_ignore, iou_thr, area_ranges=None):
139 | """Check if detected bboxes are true positive or false positive.
140 |
141 | Args:
142 | det_bbox (ndarray): the detected bbox
143 | gt_bboxes (ndarray): ground truth bboxes of this image
144 | gt_ignore (ndarray): indicate if gts are ignored for evaluation or not
145 | iou_thr (float): the iou thresholds
146 |
147 | Returns:
148 | tuple: (tp, fp), two arrays whose elements are 0 and 1
149 | """
150 | num_dets = det_bboxes.shape[0]
151 | num_gts = gt_bboxes.shape[0]
152 | if area_ranges is None:
153 | area_ranges = [(None, None)]
154 | num_scales = len(area_ranges)
155 | # tp and fp are of shape (num_scales, num_gts), each row is tp or fp of
156 | # a certain scale
157 | tp = np.zeros((num_scales, num_dets), dtype=np.float32)
158 | fp = np.zeros((num_scales, num_dets), dtype=np.float32)
159 | # if there is no gt bboxes in this image, then all det bboxes
160 | # within area range are false positives
161 | if gt_bboxes.shape[0] == 0:
162 | if area_ranges == [(None, None)]:
163 | fp[...] = 1
164 | else:
165 | det_areas = (det_bboxes[:, 2] - det_bboxes[:, 0] + 1) * (
166 | det_bboxes[:, 3] - det_bboxes[:, 1] + 1)
167 | for i, (min_area, max_area) in enumerate(area_ranges):
168 | fp[i, (det_areas >= min_area) & (det_areas < max_area)] = 1
169 | return tp, fp
170 | ious = bbox_overlaps(det_bboxes, gt_bboxes)
171 | ious_max = ious.max(axis=1)
172 | ious_argmax = ious.argmax(axis=1)
173 | sort_inds = np.argsort(-det_bboxes[:, -1])
174 | for k, (min_area, max_area) in enumerate(area_ranges):
175 | gt_covered = np.zeros(num_gts, dtype=bool)
176 | # if no area range is specified, gt_area_ignore is all False
177 | if min_area is None:
178 | gt_area_ignore = np.zeros_like(gt_ignore, dtype=bool)
179 | else:
180 | gt_areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0] + 1) * (
181 | gt_bboxes[:, 3] - gt_bboxes[:, 1] + 1)
182 | gt_area_ignore = (gt_areas < min_area) | (gt_areas >= max_area)
183 | for i in sort_inds:
184 | if ious_max[i] >= iou_thr:
185 | matched_gt = ious_argmax[i]
186 | if not (gt_ignore[matched_gt] or gt_area_ignore[matched_gt]):
187 | if not gt_covered[matched_gt]:
188 | gt_covered[matched_gt] = True
189 | tp[k, i] = 1
190 | else:
191 | fp[k, i] = 1
192 | # otherwise ignore this detected bbox, tp = 0, fp = 0
193 | elif min_area is None:
194 | fp[k, i] = 1
195 | else:
196 | bbox = det_bboxes[i, :4]
197 | area = (bbox[2] - bbox[0] + 1) * (bbox[3] - bbox[1] + 1)
198 | if area >= min_area and area < max_area:
199 | fp[k, i] = 1
200 | return tp, fp
201 |
202 |
203 | def get_cls_results(det_results, gt_bboxes, gt_labels, gt_ignore, class_id):
204 | """Get det results and gt information of a certain class."""
205 | cls_dets = [det[class_id]
206 | for det in det_results] # det bboxes of this class
207 | cls_gts = [] # gt bboxes of this class
208 | cls_gt_ignore = []
209 | for j in range(len(gt_bboxes)):
210 | gt_bbox = gt_bboxes[j]
211 | cls_inds = (gt_labels[j] == class_id + 1)
212 | cls_gt = gt_bbox[cls_inds, :] if gt_bbox.shape[0] > 0 else gt_bbox
213 | cls_gts.append(cls_gt)
214 | if gt_ignore is None:
215 | cls_gt_ignore.append(np.zeros(cls_gt.shape[0], dtype=np.int32))
216 | else:
217 | cls_gt_ignore.append(gt_ignore[j][cls_inds])
218 | return cls_dets, cls_gts, cls_gt_ignore
219 |
220 |
221 | def map_roc_pr(det_results,
222 | gt_bboxes,
223 | gt_labels,
224 | gt_ignore=None,
225 | scale_ranges=None,
226 | iou_thr=0.5,
227 | dataset=None,
228 | print_summary=True):
229 | """Evaluate mAP of a dataset.
230 |
231 | Args:
232 | det_results (list): a list of list, [[cls1_det, cls2_det, ...], ...]
233 | gt_bboxes (list): ground truth bboxes of each image, a list of K*4
234 | array.
235 | gt_labels (list): ground truth labels of each image, a list of K array
236 | gt_ignore (list): gt ignore indicators of each image, a list of K array
237 | scale_ranges (list, optional): [(min1, max1), (min2, max2), ...]
238 | iou_thr (float): IoU threshold
239 | dataset (None or str or list): dataset name or dataset classes, there
240 | are minor differences in metrics for different datsets, e.g.
241 | "voc07", "imagenet_det", etc.
242 | print_summary (bool): whether to print the mAP summary
243 |
244 | Returns:
245 | tuple: (mAP, [dict, dict, ...])
246 | """
247 | assert len(det_results) == len(gt_bboxes) == len(gt_labels)
248 | if gt_ignore is not None:
249 | assert len(gt_ignore) == len(gt_labels)
250 | for i in range(len(gt_ignore)):
251 | assert len(gt_labels[i]) == len(gt_ignore[i])
252 | area_ranges = ([(rg[0]**2, rg[1]**2) for rg in scale_ranges]
253 | if scale_ranges is not None else None)
254 | num_scales = len(scale_ranges) if scale_ranges is not None else 1
255 | eval_results = []
256 | num_classes = len(det_results[0]) # positive class num
257 | gt_labels = [
258 | label if label.ndim == 1 else label[:, 0] for label in gt_labels
259 | ]
260 |
261 | f_measure_list = []
262 | recall_list = []
263 | precision_list = []
264 | ap_list = []
265 | for i in range(num_classes):
266 | # get gt and det bboxes of this class
267 | cls_dets, cls_gts, cls_gt_ignore = get_cls_results(
268 | det_results, gt_bboxes, gt_labels, gt_ignore, i)
269 | # calculate tp and fp for each image
270 | tpfp_func = (
271 | tpfp_imagenet if dataset in ['det', 'vid'] else tpfp_default)
272 | tpfp = [
273 | tpfp_func(cls_dets[j], cls_gts[j], cls_gt_ignore[j], iou_thr,
274 | area_ranges) for j in range(len(cls_dets))
275 | ]
276 | tp, fp = tuple(zip(*tpfp))
277 | # calculate gt number of each scale, gts ignored or beyond scale
278 | # are not counted
279 | num_gts = np.zeros(num_scales, dtype=int)
280 | for j, bbox in enumerate(cls_gts):
281 | if area_ranges is None:
282 | num_gts[0] += np.sum(np.logical_not(cls_gt_ignore[j]))
283 | else:
284 | gt_areas = (bbox[:, 2] - bbox[:, 0] + 1) * (
285 | bbox[:, 3] - bbox[:, 1] + 1)
286 | for k, (min_area, max_area) in enumerate(area_ranges):
287 | num_gts[k] += np.sum(
288 | np.logical_not(cls_gt_ignore[j]) &
289 | (gt_areas >= min_area) & (gt_areas < max_area))
290 | # sort all det bboxes by score, also sort tp and fp
291 | cls_dets = np.vstack(cls_dets)
292 | num_dets = cls_dets.shape[0]
293 | sort_inds = np.argsort(-cls_dets[:, -1])
294 | tp = np.hstack(tp)[:, sort_inds]
295 | fp = np.hstack(fp)[:, sort_inds]
296 | # calculate recall and precision with tp and fp
297 | tp = np.cumsum(tp, axis=1)
298 | fp = np.cumsum(fp, axis=1)
299 | eps = np.finfo(np.float32).eps
300 | recalls = tp / np.maximum(num_gts[:, np.newaxis], eps)
301 | precisions = tp / np.maximum((tp + fp), eps)
302 | # calculate AP
303 |
304 |
305 | if scale_ranges is None:
306 | recalls = recalls[0, :]
307 | precisions = precisions[0, :]
308 | num_gts = num_gts.item()
309 | mode = 'area' if dataset != 'voc07' else '11points'
310 |
311 | ap = average_precision(recalls, precisions, mode)
312 |
313 |
314 | #------------collect data--------------#
315 | recall_list.append(recalls)
316 | precision_list.append(precisions)
317 | ap_list.append(ap)
318 |
319 |
320 |
321 | eval_results.append({
322 | 'num_gts': num_gts,
323 | 'num_dets': num_dets,
324 | 'recall': recalls,
325 | 'precision': precisions,
326 | 'ap': ap
327 | })
328 |
329 | #-----------F_measure calculation--------------#
330 |
331 | top = recalls * precisions
332 | down = recalls + precisions
333 | f_measure = np.mean(2*(top/down))
334 | f_measure_list.append(f_measure)
335 |
336 | label_names = get_classes(dataset)
337 | #------------plot PR / F-measure-----------#
338 | plt.figure(figsize=(6, 10))
339 | #------------plot curve--------------------#
340 | for i in range(num_classes):
341 | plt.subplots_adjust(hspace=0.3)
342 | plt.subplot(211)
343 | plt.plot(recall_list[i], precision_list[i], linewidth=2, \
344 | label='({}, (AP = {:.4f}))'.format(label_names[i], ap_list[i]))
345 | plt.title('Precision-Recall')
346 | plt.xlabel('Recall')
347 | plt.ylabel('Precision')
348 | plt.axis([0, 1, 0, 1])
349 | plt.legend()
350 |
351 | plt.subplot(212)
352 | plt.title('F-measure')
353 | plt.bar(label_names[i], f_measure_list[i])
354 | for a, b in zip(label_names, f_measure_list):
355 | plt.text(a, b, '%.4f' % b, color='black', fontweight='bold')
356 |
357 | plt.savefig('/mmdetection/PR_Curve_each_class.png')
358 |
359 | if scale_ranges is not None:
360 | # shape (num_classes, num_scales)
361 | all_ap = np.vstack([cls_result['ap'] for cls_result in eval_results])
362 | all_num_gts = np.vstack(
363 | [cls_result['num_gts'] for cls_result in eval_results])
364 | mean_ap = [
365 | all_ap[all_num_gts[:, i] > 0, i].mean()
366 | if np.any(all_num_gts[:, i] > 0) else 0.0
367 | for i in range(num_scales)
368 | ]
369 | else:
370 | aps = []
371 | for cls_result in eval_results:
372 | if cls_result['num_gts'] > 0:
373 | aps.append(cls_result['ap'])
374 | mean_ap = np.array(aps).mean().item() if aps else 0.0
375 | if print_summary:
376 | pass
377 | # print_map_summary(mean_ap, eval_results, dataset)
378 |
379 | return mean_ap, eval_results
380 |
381 |
382 | def print_map_summary(mean_ap, results, dataset=None):
383 | """Print mAP and results of each class.
384 |
385 | Args:
386 | mean_ap(float): calculated from `eval_map`
387 | results(list): calculated from `eval_map`
388 | dataset(None or str or list): dataset name or dataset classes.
389 | """
390 | num_scales = len(results[0]['ap']) if isinstance(results[0]['ap'],
391 | np.ndarray) else 1
392 | num_classes = len(results)
393 |
394 | recalls = np.zeros((num_scales, num_classes), dtype=np.float32)
395 | precisions = np.zeros((num_scales, num_classes), dtype=np.float32)
396 | aps = np.zeros((num_scales, num_classes), dtype=np.float32)
397 | num_gts = np.zeros((num_scales, num_classes), dtype=int)
398 | for i, cls_result in enumerate(results):
399 | if cls_result['recall'].size > 0:
400 | recalls[:, i] = np.array(cls_result['recall'], ndmin=2)[:, -1]
401 | precisions[:, i] = np.array(
402 | cls_result['precision'], ndmin=2)[:, -1]
403 | aps[:, i] = cls_result['ap']
404 | num_gts[:, i] = cls_result['num_gts']
405 |
406 | if dataset is None:
407 | label_names = [str(i) for i in range(1, num_classes + 1)]
408 | elif mmcv.is_str(dataset):
409 | label_names = get_classes(dataset)
410 | else:
411 | label_names = dataset
412 |
413 |
414 |
415 | if not isinstance(mean_ap, list):
416 | mean_ap = [mean_ap]
417 | header = ['class', 'gts', 'dets', 'recall', 'precision', 'ap']
418 | for i in range(num_scales):
419 | table_data = [header]
420 | for j in range(num_classes):
421 | row_data = [
422 | label_names[j], num_gts[i, j], results[j]['num_dets'],
423 | '{:.3f}'.format(recalls[i, j]), '{:.3f}'.format(
424 | precisions[i, j]), '{:.3f}'.format(aps[i, j])
425 | ]
426 | table_data.append(row_data)
427 | table_data.append(['mAP', '', '', '', '', '{:.3f}'.format(mean_ap[i])])
428 | table = AsciiTable(table_data)
429 | table.inner_footing_row_border = True
430 | print(table.table)
431 |
--------------------------------------------------------------------------------
/json/20190808_041204.log.json:
--------------------------------------------------------------------------------
1 | {"mode": "train", "epoch": 1, "iter": 50, "lr": 0.00797, "time": 0.59679, "data_time": 0.0968, "memory": 5693, "loss_rpn_cls": 0.11835, "loss_rpn_bbox": 0.01221, "loss_cls": 0.49759, "acc": 94.61719, "loss_bbox": 0.08404, "loss": 0.71218}
2 | {"mode": "train", "epoch": 1, "iter": 100, "lr": 0.00931, "time": 0.57119, "data_time": 0.0656, "memory": 5693, "loss_rpn_cls": 0.05051, "loss_rpn_bbox": 0.00928, "loss_cls": 0.196, "acc": 96.15625, "loss_bbox": 0.09498, "loss": 0.35077}
3 | {"mode": "train", "epoch": 1, "iter": 150, "lr": 0.01064, "time": 0.59005, "data_time": 0.00892, "memory": 5693, "loss_rpn_cls": 0.03875, "loss_rpn_bbox": 0.00888, "loss_cls": 0.20359, "acc": 95.40625, "loss_bbox": 0.10541, "loss": 0.35663}
4 | {"mode": "train", "epoch": 1, "iter": 200, "lr": 0.01197, "time": 0.59531, "data_time": 0.08774, "memory": 5693, "loss_rpn_cls": 0.03968, "loss_rpn_bbox": 0.01054, "loss_cls": 0.21803, "acc": 95.37305, "loss_bbox": 0.12022, "loss": 0.38847}
5 | {"mode": "train", "epoch": 1, "iter": 250, "lr": 0.01331, "time": 0.61337, "data_time": 0.07735, "memory": 5693, "loss_rpn_cls": 0.03748, "loss_rpn_bbox": 0.00867, "loss_cls": 0.21153, "acc": 95.86914, "loss_bbox": 0.10046, "loss": 0.35815}
6 | {"mode": "train", "epoch": 1, "iter": 300, "lr": 0.01464, "time": 0.79358, "data_time": 0.24043, "memory": 5693, "loss_rpn_cls": 0.0348, "loss_rpn_bbox": 0.00874, "loss_cls": 0.24501, "acc": 95.40234, "loss_bbox": 0.12003, "loss": 0.40859}
7 | {"mode": "train", "epoch": 1, "iter": 350, "lr": 0.01597, "time": 0.54154, "data_time": 0.03431, "memory": 5693, "loss_rpn_cls": 0.02722, "loss_rpn_bbox": 0.00876, "loss_cls": 0.1903, "acc": 95.93555, "loss_bbox": 0.10312, "loss": 0.3294}
8 | {"mode": "train", "epoch": 1, "iter": 400, "lr": 0.01731, "time": 0.60817, "data_time": 0.04485, "memory": 5693, "loss_rpn_cls": 0.02287, "loss_rpn_bbox": 0.00667, "loss_cls": 0.16813, "acc": 96.03516, "loss_bbox": 0.1006, "loss": 0.29827}
9 | {"mode": "train", "epoch": 1, "iter": 450, "lr": 0.01864, "time": 0.52028, "data_time": 0.01367, "memory": 5696, "loss_rpn_cls": 0.03169, "loss_rpn_bbox": 0.00844, "loss_cls": 0.20744, "acc": 95.05664, "loss_bbox": 0.11935, "loss": 0.36693}
10 | {"mode": "train", "epoch": 1, "iter": 500, "lr": 0.01997, "time": 0.51426, "data_time": 0.00634, "memory": 5696, "loss_rpn_cls": 0.0309, "loss_rpn_bbox": 0.00783, "loss_cls": 0.17481, "acc": 96.0, "loss_bbox": 0.10127, "loss": 0.31481}
11 | {"mode": "train", "epoch": 1, "iter": 550, "lr": 0.02, "time": 0.63824, "data_time": 0.08104, "memory": 5696, "loss_rpn_cls": 0.03039, "loss_rpn_bbox": 0.00915, "loss_cls": 0.22103, "acc": 95.04492, "loss_bbox": 0.11644, "loss": 0.377}
12 | {"mode": "train", "epoch": 2, "iter": 50, "lr": 0.02, "time": 0.71032, "data_time": 0.08964, "memory": 5696, "loss_rpn_cls": 0.03301, "loss_rpn_bbox": 0.01058, "loss_cls": 0.17785, "acc": 95.66406, "loss_bbox": 0.10286, "loss": 0.3243}
13 | {"mode": "train", "epoch": 2, "iter": 100, "lr": 0.02, "time": 0.51818, "data_time": 0.00659, "memory": 5696, "loss_rpn_cls": 0.0208, "loss_rpn_bbox": 0.00904, "loss_cls": 0.19729, "acc": 95.32617, "loss_bbox": 0.11022, "loss": 0.33734}
14 | {"mode": "train", "epoch": 2, "iter": 150, "lr": 0.02, "time": 0.51592, "data_time": 0.00659, "memory": 5696, "loss_rpn_cls": 0.03901, "loss_rpn_bbox": 0.01021, "loss_cls": 0.23754, "acc": 94.82812, "loss_bbox": 0.11851, "loss": 0.40527}
15 | {"mode": "train", "epoch": 2, "iter": 200, "lr": 0.02, "time": 0.73885, "data_time": 0.14225, "memory": 5696, "loss_rpn_cls": 0.02451, "loss_rpn_bbox": 0.00861, "loss_cls": 0.19385, "acc": 95.19727, "loss_bbox": 0.10589, "loss": 0.33286}
16 | {"mode": "train", "epoch": 2, "iter": 250, "lr": 0.02, "time": 0.67018, "data_time": 0.05752, "memory": 5696, "loss_rpn_cls": 0.02127, "loss_rpn_bbox": 0.00676, "loss_cls": 0.21145, "acc": 95.10742, "loss_bbox": 0.11188, "loss": 0.35136}
17 | {"mode": "train", "epoch": 2, "iter": 300, "lr": 0.02, "time": 1.77583, "data_time": 1.21891, "memory": 5696, "loss_rpn_cls": 0.02736, "loss_rpn_bbox": 0.00849, "loss_cls": 0.22782, "acc": 95.00586, "loss_bbox": 0.11242, "loss": 0.37608}
18 | {"mode": "train", "epoch": 2, "iter": 350, "lr": 0.02, "time": 0.58246, "data_time": 0.01317, "memory": 5696, "loss_rpn_cls": 0.02791, "loss_rpn_bbox": 0.00913, "loss_cls": 0.15841, "acc": 95.70312, "loss_bbox": 0.09111, "loss": 0.28656}
19 | {"mode": "train", "epoch": 2, "iter": 400, "lr": 0.02, "time": 1.81113, "data_time": 1.26989, "memory": 5696, "loss_rpn_cls": 0.02869, "loss_rpn_bbox": 0.00838, "loss_cls": 0.22194, "acc": 94.375, "loss_bbox": 0.11855, "loss": 0.37758}
20 | {"mode": "train", "epoch": 2, "iter": 450, "lr": 0.02, "time": 0.55255, "data_time": 0.00809, "memory": 5696, "loss_rpn_cls": 0.02299, "loss_rpn_bbox": 0.00882, "loss_cls": 0.19402, "acc": 95.03125, "loss_bbox": 0.10782, "loss": 0.33365}
21 | {"mode": "train", "epoch": 2, "iter": 500, "lr": 0.02, "time": 0.52904, "data_time": 0.02711, "memory": 5696, "loss_rpn_cls": 0.02585, "loss_rpn_bbox": 0.00886, "loss_cls": 0.15981, "acc": 95.43945, "loss_bbox": 0.08673, "loss": 0.28125}
22 | {"mode": "train", "epoch": 2, "iter": 550, "lr": 0.02, "time": 0.50466, "data_time": 0.0066, "memory": 5696, "loss_rpn_cls": 0.01482, "loss_rpn_bbox": 0.00775, "loss_cls": 0.16066, "acc": 95.36328, "loss_bbox": 0.09243, "loss": 0.27565}
23 | {"mode": "train", "epoch": 3, "iter": 115, "lr": 0.02, "time": 0.4643, "data_time": 0.18293, "memory": 5696, "loss_rpn_cls": 0.015, "loss_rpn_bbox": 0.00872, "loss_cls": 0.16652, "acc": 95.33798, "loss_bbox": 0.08805, "loss": 0.2783}
24 | {"mode": "train", "epoch": 3, "iter": 50, "lr": 0.02, "time": 0.65571, "data_time": 0.042, "memory": 5696, "loss_rpn_cls": 0.01512, "loss_rpn_bbox": 0.00929, "loss_cls": 0.19279, "acc": 94.68555, "loss_bbox": 0.10084, "loss": 0.31803}
25 | {"mode": "train", "epoch": 3, "iter": 100, "lr": 0.02, "time": 0.62714, "data_time": 0.00941, "memory": 5696, "loss_rpn_cls": 0.01642, "loss_rpn_bbox": 0.00869, "loss_cls": 0.18565, "acc": 94.45898, "loss_bbox": 0.10781, "loss": 0.31857}
26 | {"mode": "train", "epoch": 3, "iter": 150, "lr": 0.02, "time": 0.53399, "data_time": 0.00724, "memory": 5696, "loss_rpn_cls": 0.0136, "loss_rpn_bbox": 0.00865, "loss_cls": 0.16028, "acc": 94.70508, "loss_bbox": 0.0894, "loss": 0.27193}
27 | {"mode": "train", "epoch": 3, "iter": 200, "lr": 0.02, "time": 0.51833, "data_time": 0.01012, "memory": 5696, "loss_rpn_cls": 0.01231, "loss_rpn_bbox": 0.00654, "loss_cls": 0.15808, "acc": 95.64258, "loss_bbox": 0.07831, "loss": 0.25524}
28 | {"mode": "train", "epoch": 3, "iter": 250, "lr": 0.02, "time": 0.51886, "data_time": 0.01523, "memory": 5696, "loss_rpn_cls": 0.01818, "loss_rpn_bbox": 0.00802, "loss_cls": 0.16348, "acc": 95.30078, "loss_bbox": 0.08176, "loss": 0.27143}
29 | {"mode": "train", "epoch": 3, "iter": 300, "lr": 0.02, "time": 0.52, "data_time": 0.01675, "memory": 5696, "loss_rpn_cls": 0.01862, "loss_rpn_bbox": 0.01024, "loss_cls": 0.19063, "acc": 94.18359, "loss_bbox": 0.10709, "loss": 0.32658}
30 | {"mode": "train", "epoch": 3, "iter": 350, "lr": 0.02, "time": 0.51337, "data_time": 0.01368, "memory": 5696, "loss_rpn_cls": 0.01528, "loss_rpn_bbox": 0.00834, "loss_cls": 0.17178, "acc": 94.70508, "loss_bbox": 0.09347, "loss": 0.28888}
31 | {"mode": "train", "epoch": 3, "iter": 400, "lr": 0.02, "time": 0.52096, "data_time": 0.01297, "memory": 5696, "loss_rpn_cls": 0.01524, "loss_rpn_bbox": 0.00654, "loss_cls": 0.13255, "acc": 95.80273, "loss_bbox": 0.07087, "loss": 0.22519}
32 | {"mode": "train", "epoch": 3, "iter": 450, "lr": 0.02, "time": 0.52238, "data_time": 0.01699, "memory": 5696, "loss_rpn_cls": 0.01728, "loss_rpn_bbox": 0.00808, "loss_cls": 0.1352, "acc": 96.00391, "loss_bbox": 0.07413, "loss": 0.23469}
33 | {"mode": "train", "epoch": 3, "iter": 500, "lr": 0.02, "time": 0.54049, "data_time": 0.03636, "memory": 5696, "loss_rpn_cls": 0.02972, "loss_rpn_bbox": 0.0097, "loss_cls": 0.17275, "acc": 95.46094, "loss_bbox": 0.08518, "loss": 0.29735}
34 | {"mode": "train", "epoch": 3, "iter": 550, "lr": 0.02, "time": 0.81261, "data_time": 0.31663, "memory": 5696, "loss_rpn_cls": 0.0269, "loss_rpn_bbox": 0.00791, "loss_cls": 0.15391, "acc": 95.45117, "loss_bbox": 0.08503, "loss": 0.27375}
35 | {"mode": "train", "epoch": 4, "iter": 50, "lr": 0.02, "time": 0.60127, "data_time": 0.09988, "memory": 5696, "loss_rpn_cls": 0.01311, "loss_rpn_bbox": 0.00816, "loss_cls": 0.14259, "acc": 95.31836, "loss_bbox": 0.07658, "loss": 0.24045}
36 | {"mode": "train", "epoch": 4, "iter": 100, "lr": 0.02, "time": 0.71163, "data_time": 0.17248, "memory": 5696, "loss_rpn_cls": 0.01104, "loss_rpn_bbox": 0.00651, "loss_cls": 0.15079, "acc": 95.00586, "loss_bbox": 0.07959, "loss": 0.24793}
37 | {"mode": "train", "epoch": 4, "iter": 150, "lr": 0.02, "time": 0.57527, "data_time": 0.03136, "memory": 5696, "loss_rpn_cls": 0.01315, "loss_rpn_bbox": 0.00905, "loss_cls": 0.15083, "acc": 95.10742, "loss_bbox": 0.07495, "loss": 0.24799}
38 | {"mode": "train", "epoch": 4, "iter": 200, "lr": 0.02, "time": 0.56739, "data_time": 0.03235, "memory": 5696, "loss_rpn_cls": 0.01052, "loss_rpn_bbox": 0.00567, "loss_cls": 0.11691, "acc": 95.96094, "loss_bbox": 0.05841, "loss": 0.19151}
39 | {"mode": "train", "epoch": 4, "iter": 250, "lr": 0.02, "time": 0.6406, "data_time": 0.11829, "memory": 5696, "loss_rpn_cls": 0.03531, "loss_rpn_bbox": 0.01054, "loss_cls": 0.25537, "acc": 94.49609, "loss_bbox": 0.107, "loss": 0.40822}
40 | {"mode": "train", "epoch": 4, "iter": 300, "lr": 0.02, "time": 0.51271, "data_time": 0.00934, "memory": 5696, "loss_rpn_cls": 0.03043, "loss_rpn_bbox": 0.00864, "loss_cls": 0.20869, "acc": 95.01367, "loss_bbox": 0.10187, "loss": 0.34963}
41 | {"mode": "train", "epoch": 4, "iter": 350, "lr": 0.02, "time": 0.5269, "data_time": 0.01887, "memory": 5696, "loss_rpn_cls": 0.01354, "loss_rpn_bbox": 0.00632, "loss_cls": 0.13872, "acc": 95.76758, "loss_bbox": 0.0718, "loss": 0.23038}
42 | {"mode": "train", "epoch": 4, "iter": 400, "lr": 0.02, "time": 0.51912, "data_time": 0.01523, "memory": 5696, "loss_rpn_cls": 0.0145, "loss_rpn_bbox": 0.00734, "loss_cls": 0.14204, "acc": 95.16992, "loss_bbox": 0.07278, "loss": 0.23666}
43 | {"mode": "train", "epoch": 4, "iter": 450, "lr": 0.02, "time": 1.12456, "data_time": 0.6056, "memory": 5696, "loss_rpn_cls": 0.01776, "loss_rpn_bbox": 0.00924, "loss_cls": 0.13795, "acc": 95.61914, "loss_bbox": 0.07037, "loss": 0.23531}
44 | {"mode": "train", "epoch": 4, "iter": 500, "lr": 0.02, "time": 0.51186, "data_time": 0.01212, "memory": 5696, "loss_rpn_cls": 0.01487, "loss_rpn_bbox": 0.00719, "loss_cls": 0.13872, "acc": 95.75977, "loss_bbox": 0.07414, "loss": 0.23492}
45 | {"mode": "train", "epoch": 4, "iter": 550, "lr": 0.02, "time": 0.50763, "data_time": 0.00727, "memory": 5696, "loss_rpn_cls": 0.011, "loss_rpn_bbox": 0.00708, "loss_cls": 0.12878, "acc": 95.68555, "loss_bbox": 0.06927, "loss": 0.21613}
46 | {"mode": "train", "epoch": 5, "iter": 115, "lr": 0.02, "time": 0.43991, "data_time": 0.21911, "memory": 5696, "loss_rpn_cls": 0.01058, "loss_rpn_bbox": 0.00791, "loss_cls": 0.15151, "acc": 94.80554, "loss_bbox": 0.07569, "loss": 0.24569}
47 | {"mode": "train", "epoch": 5, "iter": 50, "lr": 0.02, "time": 0.52744, "data_time": 0.02597, "memory": 5696, "loss_rpn_cls": 0.01129, "loss_rpn_bbox": 0.00606, "loss_cls": 0.13429, "acc": 95.52344, "loss_bbox": 0.065, "loss": 0.21663}
48 | {"mode": "train", "epoch": 5, "iter": 100, "lr": 0.02, "time": 0.50221, "data_time": 0.00637, "memory": 5696, "loss_rpn_cls": 0.01236, "loss_rpn_bbox": 0.0061, "loss_cls": 0.13439, "acc": 95.18945, "loss_bbox": 0.06321, "loss": 0.21608}
49 | {"mode": "train", "epoch": 5, "iter": 150, "lr": 0.02, "time": 0.50715, "data_time": 0.00642, "memory": 5696, "loss_rpn_cls": 0.01211, "loss_rpn_bbox": 0.00776, "loss_cls": 0.14989, "acc": 94.78711, "loss_bbox": 0.07005, "loss": 0.2398}
50 | {"mode": "train", "epoch": 5, "iter": 200, "lr": 0.02, "time": 0.50427, "data_time": 0.00638, "memory": 5696, "loss_rpn_cls": 0.00914, "loss_rpn_bbox": 0.00716, "loss_cls": 0.13213, "acc": 95.38672, "loss_bbox": 0.06545, "loss": 0.21388}
51 | {"mode": "train", "epoch": 5, "iter": 250, "lr": 0.02, "time": 0.53265, "data_time": 0.01709, "memory": 5696, "loss_rpn_cls": 0.01041, "loss_rpn_bbox": 0.00724, "loss_cls": 0.13253, "acc": 95.34375, "loss_bbox": 0.06543, "loss": 0.21561}
52 | {"mode": "train", "epoch": 5, "iter": 300, "lr": 0.02, "time": 0.50917, "data_time": 0.00716, "memory": 5696, "loss_rpn_cls": 0.01345, "loss_rpn_bbox": 0.00737, "loss_cls": 0.13211, "acc": 95.46484, "loss_bbox": 0.06506, "loss": 0.21799}
53 | {"mode": "train", "epoch": 5, "iter": 350, "lr": 0.02, "time": 0.52442, "data_time": 0.01546, "memory": 5696, "loss_rpn_cls": 0.01018, "loss_rpn_bbox": 0.00816, "loss_cls": 0.12542, "acc": 95.55273, "loss_bbox": 0.05815, "loss": 0.20191}
54 | {"mode": "train", "epoch": 5, "iter": 400, "lr": 0.02, "time": 0.54248, "data_time": 0.04472, "memory": 5696, "loss_rpn_cls": 0.01182, "loss_rpn_bbox": 0.00879, "loss_cls": 0.13112, "acc": 95.25977, "loss_bbox": 0.05756, "loss": 0.20928}
55 | {"mode": "train", "epoch": 5, "iter": 450, "lr": 0.02, "time": 0.54863, "data_time": 0.03904, "memory": 5696, "loss_rpn_cls": 0.01463, "loss_rpn_bbox": 0.00769, "loss_cls": 0.15927, "acc": 94.65625, "loss_bbox": 0.07346, "loss": 0.25505}
56 | {"mode": "train", "epoch": 5, "iter": 500, "lr": 0.02, "time": 0.61727, "data_time": 0.03714, "memory": 5696, "loss_rpn_cls": 0.01238, "loss_rpn_bbox": 0.00644, "loss_cls": 0.14063, "acc": 95.21875, "loss_bbox": 0.06167, "loss": 0.22113}
57 | {"mode": "train", "epoch": 5, "iter": 550, "lr": 0.02, "time": 0.55806, "data_time": 0.00825, "memory": 5696, "loss_rpn_cls": 0.01484, "loss_rpn_bbox": 0.00729, "loss_cls": 0.13206, "acc": 95.5957, "loss_bbox": 0.06198, "loss": 0.21617}
58 | {"mode": "train", "epoch": 6, "iter": 50, "lr": 0.02, "time": 0.54323, "data_time": 0.04421, "memory": 5696, "loss_rpn_cls": 0.00791, "loss_rpn_bbox": 0.00557, "loss_cls": 0.12604, "acc": 95.40039, "loss_bbox": 0.06172, "loss": 0.20124}
59 | {"mode": "train", "epoch": 6, "iter": 100, "lr": 0.02, "time": 0.69622, "data_time": 0.19796, "memory": 5696, "loss_rpn_cls": 0.00921, "loss_rpn_bbox": 0.00666, "loss_cls": 0.13228, "acc": 95.25, "loss_bbox": 0.0594, "loss": 0.20755}
60 | {"mode": "train", "epoch": 6, "iter": 150, "lr": 0.02, "time": 0.60573, "data_time": 0.04169, "memory": 5696, "loss_rpn_cls": 0.01445, "loss_rpn_bbox": 0.0085, "loss_cls": 0.15071, "acc": 94.96484, "loss_bbox": 0.06803, "loss": 0.2417}
61 | {"mode": "train", "epoch": 6, "iter": 200, "lr": 0.02, "time": 0.52496, "data_time": 0.0168, "memory": 5696, "loss_rpn_cls": 0.00737, "loss_rpn_bbox": 0.00577, "loss_cls": 0.12484, "acc": 95.45898, "loss_bbox": 0.06018, "loss": 0.19816}
62 | {"mode": "train", "epoch": 6, "iter": 250, "lr": 0.02, "time": 0.55447, "data_time": 0.04938, "memory": 5696, "loss_rpn_cls": 0.01032, "loss_rpn_bbox": 0.00713, "loss_cls": 0.12543, "acc": 95.29492, "loss_bbox": 0.0574, "loss": 0.20028}
63 | {"mode": "train", "epoch": 6, "iter": 300, "lr": 0.02, "time": 0.52515, "data_time": 0.02216, "memory": 5696, "loss_rpn_cls": 0.01372, "loss_rpn_bbox": 0.00662, "loss_cls": 0.12372, "acc": 95.66602, "loss_bbox": 0.06285, "loss": 0.20691}
64 | {"mode": "train", "epoch": 6, "iter": 350, "lr": 0.02, "time": 0.52318, "data_time": 0.02102, "memory": 5696, "loss_rpn_cls": 0.00815, "loss_rpn_bbox": 0.00604, "loss_cls": 0.14505, "acc": 94.71289, "loss_bbox": 0.06095, "loss": 0.22018}
65 | {"mode": "train", "epoch": 6, "iter": 400, "lr": 0.02, "time": 0.51061, "data_time": 0.00707, "memory": 5696, "loss_rpn_cls": 0.00853, "loss_rpn_bbox": 0.00593, "loss_cls": 0.11872, "acc": 95.63086, "loss_bbox": 0.05667, "loss": 0.18986}
66 | {"mode": "train", "epoch": 6, "iter": 450, "lr": 0.02, "time": 0.50287, "data_time": 0.00686, "memory": 5696, "loss_rpn_cls": 0.00889, "loss_rpn_bbox": 0.00609, "loss_cls": 0.13925, "acc": 95.15234, "loss_bbox": 0.06055, "loss": 0.21478}
67 | {"mode": "train", "epoch": 6, "iter": 500, "lr": 0.02, "time": 0.50468, "data_time": 0.00674, "memory": 5696, "loss_rpn_cls": 0.00943, "loss_rpn_bbox": 0.00637, "loss_cls": 0.133, "acc": 95.20898, "loss_bbox": 0.05548, "loss": 0.20428}
68 | {"mode": "train", "epoch": 6, "iter": 550, "lr": 0.02, "time": 0.5879, "data_time": 0.08472, "memory": 5696, "loss_rpn_cls": 0.01385, "loss_rpn_bbox": 0.00727, "loss_cls": 0.11971, "acc": 95.67578, "loss_bbox": 0.05216, "loss": 0.19299}
69 | {"mode": "train", "epoch": 7, "iter": 115, "lr": 0.02, "time": 0.51148, "data_time": 0.27775, "memory": 5696, "loss_rpn_cls": 0.00773, "loss_rpn_bbox": 0.00594, "loss_cls": 0.12132, "acc": 95.58169, "loss_bbox": 0.05579, "loss": 0.19078}
70 | {"mode": "train", "epoch": 7, "iter": 50, "lr": 0.02, "time": 0.60383, "data_time": 0.03955, "memory": 5696, "loss_rpn_cls": 0.0114, "loss_rpn_bbox": 0.00631, "loss_cls": 0.1394, "acc": 95.04297, "loss_bbox": 0.05741, "loss": 0.21452}
71 | {"mode": "train", "epoch": 7, "iter": 100, "lr": 0.02, "time": 0.50432, "data_time": 0.00607, "memory": 5696, "loss_rpn_cls": 0.01491, "loss_rpn_bbox": 0.00806, "loss_cls": 0.15647, "acc": 94.87305, "loss_bbox": 0.06381, "loss": 0.24324}
72 | {"mode": "train", "epoch": 7, "iter": 150, "lr": 0.02, "time": 0.71423, "data_time": 0.14432, "memory": 5696, "loss_rpn_cls": 0.03039, "loss_rpn_bbox": 0.01024, "loss_cls": 0.15473, "acc": 94.99414, "loss_bbox": 0.07707, "loss": 0.27241}
73 | {"mode": "train", "epoch": 7, "iter": 200, "lr": 0.02, "time": 0.72053, "data_time": 0.10828, "memory": 5696, "loss_rpn_cls": 0.01012, "loss_rpn_bbox": 0.00624, "loss_cls": 0.1124, "acc": 96.11914, "loss_bbox": 0.05746, "loss": 0.18621}
74 | {"mode": "train", "epoch": 7, "iter": 250, "lr": 0.02, "time": 0.6184, "data_time": 0.00939, "memory": 5696, "loss_rpn_cls": 0.00973, "loss_rpn_bbox": 0.00509, "loss_cls": 0.11366, "acc": 96.02734, "loss_bbox": 0.04682, "loss": 0.17529}
75 | {"mode": "train", "epoch": 7, "iter": 300, "lr": 0.02, "time": 0.61433, "data_time": 0.01004, "memory": 5696, "loss_rpn_cls": 0.01186, "loss_rpn_bbox": 0.00697, "loss_cls": 0.1158, "acc": 95.9668, "loss_bbox": 0.05012, "loss": 0.18476}
76 | {"mode": "train", "epoch": 7, "iter": 350, "lr": 0.02, "time": 0.62333, "data_time": 0.00981, "memory": 5696, "loss_rpn_cls": 0.00889, "loss_rpn_bbox": 0.00581, "loss_cls": 0.11227, "acc": 95.95898, "loss_bbox": 0.04713, "loss": 0.1741}
77 | {"mode": "train", "epoch": 7, "iter": 400, "lr": 0.02, "time": 0.63422, "data_time": 0.02249, "memory": 5696, "loss_rpn_cls": 0.00838, "loss_rpn_bbox": 0.0056, "loss_cls": 0.10986, "acc": 95.9375, "loss_bbox": 0.04764, "loss": 0.17148}
78 | {"mode": "train", "epoch": 7, "iter": 450, "lr": 0.02, "time": 0.62604, "data_time": 0.00966, "memory": 5696, "loss_rpn_cls": 0.00623, "loss_rpn_bbox": 0.0058, "loss_cls": 0.1114, "acc": 95.74609, "loss_bbox": 0.05365, "loss": 0.17709}
79 | {"mode": "train", "epoch": 7, "iter": 500, "lr": 0.02, "time": 0.66864, "data_time": 0.05844, "memory": 5696, "loss_rpn_cls": 0.00848, "loss_rpn_bbox": 0.00699, "loss_cls": 0.13194, "acc": 95.1543, "loss_bbox": 0.05894, "loss": 0.20636}
80 | {"mode": "train", "epoch": 7, "iter": 550, "lr": 0.02, "time": 0.61844, "data_time": 0.00958, "memory": 5696, "loss_rpn_cls": 0.00906, "loss_rpn_bbox": 0.00752, "loss_cls": 0.14079, "acc": 95.05859, "loss_bbox": 0.06675, "loss": 0.22413}
81 | {"mode": "train", "epoch": 8, "iter": 50, "lr": 0.02, "time": 0.53554, "data_time": 0.02993, "memory": 5696, "loss_rpn_cls": 0.01006, "loss_rpn_bbox": 0.00743, "loss_cls": 0.12461, "acc": 95.3457, "loss_bbox": 0.05612, "loss": 0.19822}
82 | {"mode": "train", "epoch": 8, "iter": 100, "lr": 0.02, "time": 0.50579, "data_time": 0.00687, "memory": 5696, "loss_rpn_cls": 0.00728, "loss_rpn_bbox": 0.00698, "loss_cls": 0.11898, "acc": 95.42578, "loss_bbox": 0.05315, "loss": 0.18638}
83 | {"mode": "train", "epoch": 8, "iter": 150, "lr": 0.02, "time": 0.50781, "data_time": 0.00655, "memory": 5696, "loss_rpn_cls": 0.00811, "loss_rpn_bbox": 0.0063, "loss_cls": 0.11761, "acc": 95.57422, "loss_bbox": 0.05272, "loss": 0.18474}
84 | {"mode": "train", "epoch": 8, "iter": 200, "lr": 0.02, "time": 0.50765, "data_time": 0.00667, "memory": 5696, "loss_rpn_cls": 0.00802, "loss_rpn_bbox": 0.00556, "loss_cls": 0.12714, "acc": 95.42773, "loss_bbox": 0.05261, "loss": 0.19334}
85 | {"mode": "train", "epoch": 8, "iter": 250, "lr": 0.02, "time": 0.52218, "data_time": 0.01573, "memory": 5696, "loss_rpn_cls": 0.00745, "loss_rpn_bbox": 0.00507, "loss_cls": 0.10948, "acc": 96.04492, "loss_bbox": 0.04383, "loss": 0.16583}
86 | {"mode": "train", "epoch": 8, "iter": 300, "lr": 0.02, "time": 0.544, "data_time": 0.03903, "memory": 5696, "loss_rpn_cls": 0.0073, "loss_rpn_bbox": 0.00615, "loss_cls": 0.11153, "acc": 95.77148, "loss_bbox": 0.04881, "loss": 0.17379}
87 | {"mode": "train", "epoch": 8, "iter": 350, "lr": 0.02, "time": 1.09267, "data_time": 0.59414, "memory": 5696, "loss_rpn_cls": 0.00766, "loss_rpn_bbox": 0.00582, "loss_cls": 0.11566, "acc": 95.80273, "loss_bbox": 0.05777, "loss": 0.18691}
88 | {"mode": "train", "epoch": 8, "iter": 400, "lr": 0.02, "time": 0.50323, "data_time": 0.00671, "memory": 5696, "loss_rpn_cls": 0.01412, "loss_rpn_bbox": 0.00728, "loss_cls": 0.11361, "acc": 96.06641, "loss_bbox": 0.05286, "loss": 0.18787}
89 | {"mode": "train", "epoch": 8, "iter": 450, "lr": 0.02, "time": 0.51021, "data_time": 0.00673, "memory": 5696, "loss_rpn_cls": 0.01137, "loss_rpn_bbox": 0.00646, "loss_cls": 0.11739, "acc": 95.77734, "loss_bbox": 0.05152, "loss": 0.18675}
90 | {"mode": "train", "epoch": 8, "iter": 500, "lr": 0.02, "time": 0.51532, "data_time": 0.01624, "memory": 5696, "loss_rpn_cls": 0.00803, "loss_rpn_bbox": 0.00589, "loss_cls": 0.09256, "acc": 96.40625, "loss_bbox": 0.04921, "loss": 0.15568}
91 | {"mode": "train", "epoch": 8, "iter": 550, "lr": 0.02, "time": 0.50599, "data_time": 0.00654, "memory": 5696, "loss_rpn_cls": 0.00736, "loss_rpn_bbox": 0.00455, "loss_cls": 0.10456, "acc": 95.81836, "loss_bbox": 0.04204, "loss": 0.15851}
92 | {"mode": "train", "epoch": 9, "iter": 115, "lr": 0.02, "time": 0.47793, "data_time": 0.21516, "memory": 5696, "loss_rpn_cls": 0.00941, "loss_rpn_bbox": 0.00556, "loss_cls": 0.10351, "acc": 96.1464, "loss_bbox": 0.05045, "loss": 0.16893}
93 | {"mode": "train", "epoch": 9, "iter": 50, "lr": 0.002, "time": 0.66657, "data_time": 0.0535, "memory": 5696, "loss_rpn_cls": 0.00783, "loss_rpn_bbox": 0.00554, "loss_cls": 0.09764, "acc": 96.47461, "loss_bbox": 0.04826, "loss": 0.15928}
94 | {"mode": "train", "epoch": 9, "iter": 100, "lr": 0.002, "time": 0.62643, "data_time": 0.01115, "memory": 5696, "loss_rpn_cls": 0.0112, "loss_rpn_bbox": 0.00533, "loss_cls": 0.10305, "acc": 96.04688, "loss_bbox": 0.04397, "loss": 0.16356}
95 | {"mode": "train", "epoch": 9, "iter": 150, "lr": 0.002, "time": 0.62425, "data_time": 0.00944, "memory": 5696, "loss_rpn_cls": 0.00628, "loss_rpn_bbox": 0.00489, "loss_cls": 0.08919, "acc": 96.45508, "loss_bbox": 0.03748, "loss": 0.13783}
96 | {"mode": "train", "epoch": 9, "iter": 200, "lr": 0.002, "time": 0.62269, "data_time": 0.00947, "memory": 5696, "loss_rpn_cls": 0.00727, "loss_rpn_bbox": 0.00399, "loss_cls": 0.09384, "acc": 96.51758, "loss_bbox": 0.03587, "loss": 0.14097}
97 | {"mode": "train", "epoch": 9, "iter": 250, "lr": 0.002, "time": 0.62472, "data_time": 0.0096, "memory": 5696, "loss_rpn_cls": 0.00641, "loss_rpn_bbox": 0.00699, "loss_cls": 0.10836, "acc": 95.74609, "loss_bbox": 0.05005, "loss": 0.1718}
98 | {"mode": "train", "epoch": 9, "iter": 300, "lr": 0.002, "time": 0.61771, "data_time": 0.00952, "memory": 5696, "loss_rpn_cls": 0.00587, "loss_rpn_bbox": 0.00415, "loss_cls": 0.08779, "acc": 96.65039, "loss_bbox": 0.03464, "loss": 0.13246}
99 | {"mode": "train", "epoch": 9, "iter": 350, "lr": 0.002, "time": 0.61732, "data_time": 0.00954, "memory": 5696, "loss_rpn_cls": 0.00727, "loss_rpn_bbox": 0.00493, "loss_cls": 0.09233, "acc": 96.36133, "loss_bbox": 0.03873, "loss": 0.14327}
100 | {"mode": "train", "epoch": 9, "iter": 400, "lr": 0.002, "time": 0.74154, "data_time": 0.13262, "memory": 5696, "loss_rpn_cls": 0.00466, "loss_rpn_bbox": 0.00375, "loss_cls": 0.09513, "acc": 96.31641, "loss_bbox": 0.0429, "loss": 0.14644}
101 | {"mode": "train", "epoch": 9, "iter": 450, "lr": 0.002, "time": 0.62911, "data_time": 0.01007, "memory": 5696, "loss_rpn_cls": 0.00534, "loss_rpn_bbox": 0.00411, "loss_cls": 0.08636, "acc": 96.52539, "loss_bbox": 0.03979, "loss": 0.13559}
102 | {"mode": "train", "epoch": 9, "iter": 500, "lr": 0.002, "time": 0.57786, "data_time": 0.00847, "memory": 5696, "loss_rpn_cls": 0.00463, "loss_rpn_bbox": 0.00363, "loss_cls": 0.08264, "acc": 96.76953, "loss_bbox": 0.03445, "loss": 0.12536}
103 | {"mode": "train", "epoch": 9, "iter": 550, "lr": 0.002, "time": 0.50238, "data_time": 0.00635, "memory": 5696, "loss_rpn_cls": 0.00399, "loss_rpn_bbox": 0.00363, "loss_cls": 0.07193, "acc": 97.25781, "loss_bbox": 0.02702, "loss": 0.10657}
104 | {"mode": "train", "epoch": 10, "iter": 50, "lr": 0.002, "time": 0.65076, "data_time": 0.03772, "memory": 5696, "loss_rpn_cls": 0.00544, "loss_rpn_bbox": 0.0048, "loss_cls": 0.088, "acc": 96.51953, "loss_bbox": 0.03588, "loss": 0.13412}
105 | {"mode": "train", "epoch": 10, "iter": 100, "lr": 0.002, "time": 0.62092, "data_time": 0.00971, "memory": 5696, "loss_rpn_cls": 0.00474, "loss_rpn_bbox": 0.00415, "loss_cls": 0.07196, "acc": 97.14062, "loss_bbox": 0.02935, "loss": 0.1102}
106 | {"mode": "train", "epoch": 10, "iter": 150, "lr": 0.002, "time": 0.61863, "data_time": 0.00968, "memory": 5696, "loss_rpn_cls": 0.00706, "loss_rpn_bbox": 0.0052, "loss_cls": 0.10249, "acc": 96.06055, "loss_bbox": 0.03915, "loss": 0.1539}
107 | {"mode": "train", "epoch": 10, "iter": 200, "lr": 0.002, "time": 0.61971, "data_time": 0.00964, "memory": 5696, "loss_rpn_cls": 0.00707, "loss_rpn_bbox": 0.00481, "loss_cls": 0.08109, "acc": 97.04102, "loss_bbox": 0.03655, "loss": 0.12951}
108 | {"mode": "train", "epoch": 10, "iter": 250, "lr": 0.002, "time": 0.65088, "data_time": 0.04016, "memory": 5696, "loss_rpn_cls": 0.00403, "loss_rpn_bbox": 0.00388, "loss_cls": 0.06913, "acc": 97.375, "loss_bbox": 0.03145, "loss": 0.10849}
109 | {"mode": "train", "epoch": 10, "iter": 300, "lr": 0.002, "time": 0.62276, "data_time": 0.00966, "memory": 5696, "loss_rpn_cls": 0.00469, "loss_rpn_bbox": 0.00388, "loss_cls": 0.08334, "acc": 96.84961, "loss_bbox": 0.03638, "loss": 0.12829}
110 | {"mode": "train", "epoch": 10, "iter": 350, "lr": 0.002, "time": 0.62188, "data_time": 0.01001, "memory": 5696, "loss_rpn_cls": 0.00551, "loss_rpn_bbox": 0.00414, "loss_cls": 0.07683, "acc": 96.92969, "loss_bbox": 0.0334, "loss": 0.11988}
111 | {"mode": "train", "epoch": 10, "iter": 400, "lr": 0.002, "time": 0.62571, "data_time": 0.00958, "memory": 5696, "loss_rpn_cls": 0.00509, "loss_rpn_bbox": 0.00395, "loss_cls": 0.08428, "acc": 96.5293, "loss_bbox": 0.0345, "loss": 0.12781}
112 | {"mode": "train", "epoch": 10, "iter": 450, "lr": 0.002, "time": 0.62705, "data_time": 0.00964, "memory": 5696, "loss_rpn_cls": 0.00391, "loss_rpn_bbox": 0.00393, "loss_cls": 0.06881, "acc": 97.16602, "loss_bbox": 0.03043, "loss": 0.10708}
113 | {"mode": "train", "epoch": 10, "iter": 500, "lr": 0.002, "time": 0.62267, "data_time": 0.00957, "memory": 5696, "loss_rpn_cls": 0.0062, "loss_rpn_bbox": 0.00457, "loss_cls": 0.09365, "acc": 96.18359, "loss_bbox": 0.03698, "loss": 0.14141}
114 | {"mode": "train", "epoch": 10, "iter": 550, "lr": 0.002, "time": 0.62301, "data_time": 0.00966, "memory": 5696, "loss_rpn_cls": 0.00408, "loss_rpn_bbox": 0.00397, "loss_cls": 0.08208, "acc": 96.70312, "loss_bbox": 0.03618, "loss": 0.12632}
115 | {"mode": "train", "epoch": 11, "iter": 115, "lr": 0.002, "time": 0.46758, "data_time": 0.18141, "memory": 5696, "loss_rpn_cls": 0.00526, "loss_rpn_bbox": 0.00422, "loss_cls": 0.07642, "acc": 97.09834, "loss_bbox": 0.03318, "loss": 0.11907}
116 | {"mode": "train", "epoch": 11, "iter": 50, "lr": 0.002, "time": 0.63706, "data_time": 0.0285, "memory": 5696, "loss_rpn_cls": 0.00585, "loss_rpn_bbox": 0.00429, "loss_cls": 0.07606, "acc": 96.89453, "loss_bbox": 0.03286, "loss": 0.11906}
117 | {"mode": "train", "epoch": 11, "iter": 100, "lr": 0.002, "time": 0.54277, "data_time": 0.00764, "memory": 5696, "loss_rpn_cls": 0.0049, "loss_rpn_bbox": 0.00448, "loss_cls": 0.08597, "acc": 96.70117, "loss_bbox": 0.03729, "loss": 0.13264}
118 | {"mode": "train", "epoch": 11, "iter": 150, "lr": 0.002, "time": 0.51208, "data_time": 0.00926, "memory": 5696, "loss_rpn_cls": 0.00422, "loss_rpn_bbox": 0.00381, "loss_cls": 0.07464, "acc": 97.12109, "loss_bbox": 0.03192, "loss": 0.11459}
119 | {"mode": "train", "epoch": 11, "iter": 200, "lr": 0.002, "time": 0.50722, "data_time": 0.00935, "memory": 5696, "loss_rpn_cls": 0.00586, "loss_rpn_bbox": 0.00424, "loss_cls": 0.07833, "acc": 97.02344, "loss_bbox": 0.03441, "loss": 0.12285}
120 | {"mode": "train", "epoch": 11, "iter": 250, "lr": 0.002, "time": 0.56258, "data_time": 0.06363, "memory": 5696, "loss_rpn_cls": 0.00562, "loss_rpn_bbox": 0.00477, "loss_cls": 0.08335, "acc": 96.95898, "loss_bbox": 0.03518, "loss": 0.12891}
121 | {"mode": "train", "epoch": 11, "iter": 300, "lr": 0.002, "time": 0.51535, "data_time": 0.01336, "memory": 5696, "loss_rpn_cls": 0.0047, "loss_rpn_bbox": 0.00403, "loss_cls": 0.07576, "acc": 97.0918, "loss_bbox": 0.0347, "loss": 0.1192}
122 | {"mode": "train", "epoch": 11, "iter": 350, "lr": 0.002, "time": 0.70907, "data_time": 0.1187, "memory": 5696, "loss_rpn_cls": 0.00535, "loss_rpn_bbox": 0.00338, "loss_cls": 0.0737, "acc": 97.31836, "loss_bbox": 0.03076, "loss": 0.11319}
123 | {"mode": "train", "epoch": 11, "iter": 400, "lr": 0.002, "time": 1.27861, "data_time": 0.67146, "memory": 5696, "loss_rpn_cls": 0.0037, "loss_rpn_bbox": 0.00346, "loss_cls": 0.07627, "acc": 96.89453, "loss_bbox": 0.02839, "loss": 0.11182}
124 | {"mode": "train", "epoch": 11, "iter": 450, "lr": 0.002, "time": 0.5671, "data_time": 0.00835, "memory": 5696, "loss_rpn_cls": 0.00387, "loss_rpn_bbox": 0.004, "loss_cls": 0.06613, "acc": 97.42969, "loss_bbox": 0.03056, "loss": 0.10455}
125 | {"mode": "train", "epoch": 11, "iter": 500, "lr": 0.002, "time": 0.60135, "data_time": 0.09074, "memory": 5696, "loss_rpn_cls": 0.00402, "loss_rpn_bbox": 0.0038, "loss_cls": 0.07879, "acc": 96.77734, "loss_bbox": 0.032, "loss": 0.11861}
126 | {"mode": "train", "epoch": 11, "iter": 550, "lr": 0.002, "time": 0.71062, "data_time": 0.16395, "memory": 5696, "loss_rpn_cls": 0.00457, "loss_rpn_bbox": 0.00421, "loss_cls": 0.07552, "acc": 97.07227, "loss_bbox": 0.03425, "loss": 0.11855}
127 | {"mode": "train", "epoch": 12, "iter": 50, "lr": 0.0002, "time": 0.68538, "data_time": 0.07949, "memory": 5696, "loss_rpn_cls": 0.00303, "loss_rpn_bbox": 0.00349, "loss_cls": 0.06376, "acc": 97.4668, "loss_bbox": 0.02717, "loss": 0.09745}
128 | {"mode": "train", "epoch": 12, "iter": 100, "lr": 0.0002, "time": 0.52017, "data_time": 0.0141, "memory": 5696, "loss_rpn_cls": 0.00689, "loss_rpn_bbox": 0.0041, "loss_cls": 0.06425, "acc": 97.46484, "loss_bbox": 0.026, "loss": 0.10124}
129 | {"mode": "train", "epoch": 12, "iter": 150, "lr": 0.0002, "time": 0.52325, "data_time": 0.02226, "memory": 5696, "loss_rpn_cls": 0.00297, "loss_rpn_bbox": 0.00397, "loss_cls": 0.07261, "acc": 97.19531, "loss_bbox": 0.0298, "loss": 0.10935}
130 | {"mode": "train", "epoch": 12, "iter": 200, "lr": 0.0002, "time": 0.51168, "data_time": 0.00675, "memory": 5696, "loss_rpn_cls": 0.00506, "loss_rpn_bbox": 0.00398, "loss_cls": 0.07436, "acc": 97.12109, "loss_bbox": 0.03109, "loss": 0.11448}
131 | {"mode": "train", "epoch": 12, "iter": 250, "lr": 0.0002, "time": 0.56226, "data_time": 0.05819, "memory": 5696, "loss_rpn_cls": 0.00368, "loss_rpn_bbox": 0.00398, "loss_cls": 0.07391, "acc": 97.19336, "loss_bbox": 0.03348, "loss": 0.11504}
132 | {"mode": "train", "epoch": 12, "iter": 300, "lr": 0.0002, "time": 0.50746, "data_time": 0.00649, "memory": 5696, "loss_rpn_cls": 0.00489, "loss_rpn_bbox": 0.00389, "loss_cls": 0.07403, "acc": 97.19141, "loss_bbox": 0.03107, "loss": 0.11388}
133 | {"mode": "train", "epoch": 12, "iter": 350, "lr": 0.0002, "time": 0.68346, "data_time": 0.17951, "memory": 5696, "loss_rpn_cls": 0.00464, "loss_rpn_bbox": 0.00429, "loss_cls": 0.07448, "acc": 97.33398, "loss_bbox": 0.03567, "loss": 0.11909}
134 | {"mode": "train", "epoch": 12, "iter": 400, "lr": 0.0002, "time": 0.57773, "data_time": 0.04374, "memory": 5696, "loss_rpn_cls": 0.00353, "loss_rpn_bbox": 0.00412, "loss_cls": 0.07164, "acc": 97.17383, "loss_bbox": 0.03015, "loss": 0.10944}
135 | {"mode": "train", "epoch": 12, "iter": 450, "lr": 0.0002, "time": 0.52545, "data_time": 0.0215, "memory": 5696, "loss_rpn_cls": 0.00539, "loss_rpn_bbox": 0.00419, "loss_cls": 0.07324, "acc": 97.15234, "loss_bbox": 0.03333, "loss": 0.11615}
136 | {"mode": "train", "epoch": 12, "iter": 500, "lr": 0.0002, "time": 0.53737, "data_time": 0.03108, "memory": 5696, "loss_rpn_cls": 0.0038, "loss_rpn_bbox": 0.00354, "loss_cls": 0.06582, "acc": 97.54492, "loss_bbox": 0.03, "loss": 0.10316}
137 | {"mode": "train", "epoch": 12, "iter": 550, "lr": 0.0002, "time": 0.51086, "data_time": 0.01335, "memory": 5696, "loss_rpn_cls": 0.0042, "loss_rpn_bbox": 0.00334, "loss_cls": 0.06598, "acc": 97.62109, "loss_bbox": 0.02652, "loss": 0.10004}
138 | {"mode": "train", "epoch": 13, "iter": 115, "lr": 0.0002, "time": 0.48316, "data_time": 0.26549, "memory": 5696, "loss_rpn_cls": 0.00433, "loss_rpn_bbox": 0.00392, "loss_cls": 0.0703, "acc": 97.23336, "loss_bbox": 0.03131, "loss": 0.10985}
--------------------------------------------------------------------------------