├── .github
├── FUNDING.yml
└── workflows
│ └── stale.yml
├── .gitignore
├── LICENSE
├── README.md
├── _init_paths.py
├── aux_images
├── 11-pointInterpolation.png
├── interpolated_precision-AUC_v2.png
├── interpolated_precision_v2.png
├── iou.png
├── precision_recall_example_1_v2.png
├── samples_1_v2.png
├── table_1_v2.png
└── table_2_v2.png
├── detections
├── 00001.txt
├── 00002.txt
├── 00003.txt
├── 00004.txt
├── 00005.txt
├── 00006.txt
└── 00007.txt
├── detections_rel
├── 00001.txt
├── 00002.txt
├── 00003.txt
├── 00004.txt
├── 00005.txt
├── 00006.txt
└── 00007.txt
├── groundtruths
├── 00001.txt
├── 00002.txt
├── 00003.txt
├── 00004.txt
├── 00005.txt
├── 00006.txt
└── 00007.txt
├── groundtruths_rel
├── 00001.txt
├── 00002.txt
├── 00003.txt
├── 00004.txt
├── 00005.txt
├── 00006.txt
└── 00007.txt
├── lib
├── BoundingBox.py
├── BoundingBoxes.py
├── Evaluator.py
├── __init__.py
└── utils.py
├── message.txt
├── paper_survey_on_performance_metrics_for_object_detection_algorithms.pdf
├── pascalvoc.py
├── requirements.txt
├── results
├── person.png
├── person_11-pointInterpolation.png
└── results.txt
└── samples
├── sample_1
├── README.md
├── _init_paths.py
├── images
│ ├── 000001.jpg
│ ├── 000002.jpg
│ ├── 000003.jpg
│ ├── detections
│ │ ├── 000001.png
│ │ ├── 000002.png
│ │ └── 000003.png
│ └── groundtruths
│ │ ├── 000001.jpg
│ │ ├── 000002.jpg
│ │ └── 000003.jpg
└── sample_1.py
└── sample_2
├── README.md
├── _init_paths.py
├── detections
├── 00001.txt
├── 00002.txt
├── 00003.txt
├── 00004.txt
├── 00005.txt
├── 00006.txt
└── 00007.txt
├── groundtruths
├── 00001.txt
├── 00002.txt
├── 00003.txt
├── 00004.txt
├── 00005.txt
├── 00006.txt
└── 00007.txt
└── sample_2.py
/.github/FUNDING.yml:
--------------------------------------------------------------------------------
1 | # These are supported funding model platforms
2 |
3 | github: [rafaelpadilla]
4 | patreon: # Replace with a single Patreon username
5 | open_collective: # Replace with a single Open Collective username
6 | ko_fi: # Replace with a single Ko-fi username
7 | tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
8 | community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
9 | liberapay: # Replace with a single Liberapay username
10 | issuehunt: # Replace with a single IssueHunt username
11 | otechie: # Replace with a single Otechie username
12 | lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry
13 | custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']
14 |
--------------------------------------------------------------------------------
/.github/workflows/stale.yml:
--------------------------------------------------------------------------------
1 | name: Close stale issues
2 |
3 | on:
4 | schedule:
5 | - cron: "30 1 * * *"
6 |
7 | jobs:
8 | stale:
9 |
10 | runs-on: ubuntu-latest
11 |
12 | steps:
13 | - uses: actions/stale@v3
14 | with:
15 | repo-token: ${{ secrets.GITHUB_TOKEN }}
16 | stale-issue-message: 'This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.'
17 | stale-pr-message: 'This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.'
18 | days-before-stale: 30
19 | days-before-close: 5
20 | stale-issue-label: 'no-issue-actvity'
21 | stale-pr-label: 'no-pr-activity'
22 | operations-per-run: 100
23 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | ### Python ###
5 | # Byte-compiled / optimized / DLL files
6 | __pycache__/
7 | *.py[cod]
8 | *$py.class
9 |
10 | # C extensions
11 | *.so
12 |
13 | # Distribution / packaging
14 | .Python
15 | build/
16 | develop-eggs/
17 | dist/
18 | downloads/
19 | eggs/
20 | .eggs/
21 | lib64/
22 | parts/
23 | sdist/
24 | var/
25 | wheels/
26 | *.egg-info/
27 | .installed.cfg
28 | *.egg
29 |
30 | # PyInstaller
31 | # Usually these files are written by a python script from a template
32 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
33 | *.manifest
34 | *.spec
35 |
36 | # Installer logs
37 | pip-log.txt
38 | pip-delete-this-directory.txt
39 |
40 | # Unit test / coverage reports
41 | htmlcov/
42 | .tox/
43 | .coverage
44 | .coverage.*
45 | .cache
46 | .pytest_cache/
47 | nosetests.xml
48 | coverage.xml
49 | *.cover
50 | .hypothesis/
51 |
52 | # Translations
53 | *.mo
54 | *.pot
55 |
56 | # Flask stuff:
57 | instance/
58 | .webassets-cache
59 |
60 | # Scrapy stuff:
61 | .scrapy
62 |
63 | # Sphinx documentation
64 | docs/_build/
65 |
66 | # PyBuilder
67 | target/
68 |
69 | # Jupyter Notebook
70 | .ipynb_checkpoints
71 |
72 | # pyenv
73 | .python-version
74 |
75 | # celery beat schedule file
76 | celerybeat-schedule.*
77 |
78 | # SageMath parsed files
79 | *.sage.py
80 |
81 | # Environments
82 | .env
83 | .venv
84 | env/
85 | venv/
86 | ENV/
87 | env.bak/
88 | venv.bak/
89 |
90 | # Spyder project settings
91 | .spyderproject
92 | .spyproject
93 |
94 | # Rope project settings
95 | .ropeproject
96 |
97 | # mkdocs documentation
98 | /site
99 |
100 | # mypy
101 | .mypy_cache/
102 |
103 | ### VisualStudioCode ###
104 | .vscode/
105 | .vscode/*
106 | !.vscode/settings.json
107 | !.vscode/tasks.json
108 | !.vscode/launch.json
109 | !.vscode/extensions.json
110 | .history
111 |
112 | # My stuff
113 | ToDo.txt
114 | test.py
115 | references/
116 | aux_images/older_version/
117 | 3rd_party/
118 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2018 Rafael Padilla
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 | ## Citation
14 |
15 | If you use this code for your research, please consider citing:
16 |
17 | ```
18 | @Article{electronics10030279,
19 | AUTHOR = {Padilla, Rafael and Passos, Wesley L. and Dias, Thadeu L. B. and Netto, Sergio L. and da Silva, Eduardo A. B.},
20 | TITLE = {A Comparative Analysis of Object Detection Metrics with a Companion Open-Source Toolkit},
21 | JOURNAL = {Electronics},
22 | VOLUME = {10},
23 | YEAR = {2021},
24 | NUMBER = {3},
25 | ARTICLE-NUMBER = {279},
26 | URL = {https://www.mdpi.com/2079-9292/10/3/279},
27 | ISSN = {2079-9292},
28 | DOI = {10.3390/electronics10030279}
29 | }
30 | ```
31 | Download the paper [here](https://www.mdpi.com/2079-9292/10/3/279/pdf) or [here](https://github.com/rafaelpadilla/review_object_detection_metrics/blob/main/published_paper.pdf).
32 |
33 | ```
34 | @INPROCEEDINGS {padillaCITE2020,
35 | author = {R. {Padilla} and S. L. {Netto} and E. A. B. {da Silva}},
36 | title = {A Survey on Performance Metrics for Object-Detection Algorithms},
37 | booktitle = {2020 International Conference on Systems, Signals and Image Processing (IWSSIP)},
38 | year = {2020},
39 | pages = {237-242},}
40 | ```
41 | Download the paper [here](https://github.com/rafaelpadilla/Object-Detection-Metrics/raw/master/paper_survey_on_performance_metrics_for_object_detection_algorithms.pdf)
42 |
43 | -----------------
44 |
45 | Attention! A new version of this tool is available [here](https://github.com/rafaelpadilla/review_object_detection_metrics)
46 | =======
47 |
48 | The new version includes **all COCO metrics**, supports **other file formats**, provides a **User Interface (UI)** to guide the evaluation process, and presents the **STT-AP metric** to evaluate object detection in videos.
49 |
50 | -----------------
51 |
52 | # Metrics for object detection
53 |
54 | The motivation of this project is the lack of consensus used by different works and implementations concerning the **evaluation metrics of the object detection problem**. Although on-line competitions use their own metrics to evaluate the task of object detection, just some of them offer reference code snippets to calculate the accuracy of the detected objects.
55 | Researchers who want to evaluate their work using different datasets than those offered by the competitions, need to implement their own version of the metrics. Sometimes a wrong or different implementation can create different and biased results. Ideally, in order to have trustworthy benchmarking among different approaches, it is necessary to have a flexible implementation that can be used by everyone regardless the dataset used.
56 |
57 | **This project provides easy-to-use functions implementing the same metrics used by the the most popular competitions of object detection**. Our implementation does not require modifications of your detection model to complicated input formats, avoiding conversions to XML or JSON files. We simplified the input data (ground truth bounding boxes and detected bounding boxes) and gathered in a single project the main metrics used by the academia and challenges. Our implementation was carefully compared against the official implementations and our results are exactly the same.
58 |
59 | In the topics below you can find an overview of the most popular metrics used in different competitions and works, as well as samples showing how to use our code.
60 |
61 | ## Table of contents
62 |
63 | - [Motivation](#metrics-for-object-detection)
64 | - [Different competitions, different metrics](#different-competitions-different-metrics)
65 | - [Important definitions](#important-definitions)
66 | - [Metrics](#metrics)
67 | - [Precision x Recall curve](#precision-x-recall-curve)
68 | - [Average Precision](#average-precision)
69 | - [11-point interpolation](#11-point-interpolation)
70 | - [Interpolating all points](#interpolating-all-points)
71 | - [**How to use this project**](#how-to-use-this-project)
72 | - [References](#references)
73 |
74 |
75 | ## Different competitions, different metrics
76 |
77 | * **[PASCAL VOC Challenge](http://host.robots.ox.ac.uk/pascal/VOC/)** offers a Matlab script in order to evaluate the quality of the detected objects. Participants of the competition can use the provided Matlab script to measure the accuracy of their detections before submitting their results. The official documentation explaining their criteria for object detection metrics can be accessed [here](http://host.robots.ox.ac.uk/pascal/VOC/voc2012/htmldoc/devkit_doc.html#SECTION00050000000000000000). The current metrics used by the current PASCAL VOC object detection challenge are the **Precision x Recall curve** and **Average Precision**.
78 | The PASCAL VOC Matlab evaluation code reads the ground truth bounding boxes from XML files, requiring changes in the code if you want to apply it to other datasets or to your specific cases. Even though projects such as [Faster-RCNN](https://github.com/rbgirshick/py-faster-rcnn) implement PASCAL VOC evaluation metrics, it is also necessary to convert the detected bounding boxes into their specific format. [Tensorflow](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/evaluation_protocols.md) framework also has their PASCAL VOC metrics implementation.
79 |
80 | * **[COCO Detection Challenge](https://competitions.codalab.org/competitions/5181)** uses different metrics to evaluate the accuracy of object detection of different algorithms. [Here](http://cocodataset.org/#detection-eval) you can find a documentation explaining the 12 metrics used for characterizing the performance of an object detector on COCO. This competition offers Python and Matlab codes so users can verify their scores before submitting the results. It is also necessary to convert the results to a [format](http://cocodataset.org/#format-results) required by the competition.
81 |
82 | * **[Google Open Images Dataset V4 Competition](https://storage.googleapis.com/openimages/web/challenge.html)** also uses mean Average Precision (mAP) over the 500 classes to evaluate the object detection task.
83 |
84 | * **[ImageNet Object Localization Challenge](https://www.kaggle.com/c/imagenet-object-detection-challenge)** defines an error for each image considering the class and the overlapping region between ground truth and detected boxes. The total error is computed as the average of all min errors among all test dataset images. [Here](https://www.kaggle.com/c/imagenet-object-localization-challenge#evaluation) are more details about their evaluation method.
85 |
86 | ## Important definitions
87 |
88 | ### Intersection Over Union (IOU)
89 |
90 |
91 | Intersection Over Union (IOU) is a measure based on Jaccard Index that evaluates the overlap between two bounding boxes. It requires a ground truth bounding box  and a predicted bounding box . By applying the IOU we can tell if a detection is valid (True Positive) or not (False Positive).
92 |
93 | IOU is given by the overlapping area between the predicted bounding box and the ground truth bounding box divided by the area of union between them:
94 |
95 |
96 |
97 |
98 |
99 |
102 |
103 | The image below illustrates the IOU between a ground truth bounding box (in green) and a detected bounding box (in red).
104 |
105 |
106 |
107 | 
108 |
109 | ### True Positive, False Positive, False Negative and True Negative
110 |
111 | Some basic concepts used by the metrics:
112 |
113 | * **True Positive (TP)**: A correct detection. Detection with IOU ≥ _threshold_
114 | * **False Positive (FP)**: A wrong detection. Detection with IOU < _threshold_
115 | * **False Negative (FN)**: A ground truth not detected
116 | * **True Negative (TN)**: Does not apply. It would represent a corrected misdetection. In the object detection task there are many possible bounding boxes that should not be detected within an image. Thus, TN would be all possible bounding boxes that were corrrectly not detected (so many possible boxes within an image). That's why it is not used by the metrics.
117 |
118 | _threshold_: depending on the metric, it is usually set to 50%, 75% or 95%.
119 |
120 | ### Precision
121 |
122 | Precision is the ability of a model to identify **only** the relevant objects. It is the percentage of correct positive predictions and is given by:
123 |
124 |
125 |
126 |
127 |
128 |
131 |
132 | ### Recall
133 |
134 | Recall is the ability of a model to find all the relevant cases (all ground truth bounding boxes). It is the percentage of true positive detected among all relevant ground truths and is given by:
135 |
136 |
137 |
138 |
139 |
142 |
143 | ## Metrics
144 |
145 | In the topics below there are some comments on the most popular metrics used for object detection.
146 |
147 | ### Precision x Recall curve
148 |
149 | The Precision x Recall curve is a good way to evaluate the performance of an object detector as the confidence is changed by plotting a curve for each object class. An object detector of a particular class is considered good if its precision stays high as recall increases, which means that if you vary the confidence threshold, the precision and recall will still be high. Another way to identify a good object detector is to look for a detector that can identify only relevant objects (0 False Positives = high precision), finding all ground truth objects (0 False Negatives = high recall).
150 |
151 | A poor object detector needs to increase the number of detected objects (increasing False Positives = lower precision) in order to retrieve all ground truth objects (high recall). That's why the Precision x Recall curve usually starts with high precision values, decreasing as recall increases. You can see an example of the Prevision x Recall curve in the next topic (Average Precision). This kind of curve is used by the PASCAL VOC 2012 challenge and is available in our implementation.
152 |
153 | ### Average Precision
154 |
155 | Another way to compare the performance of object detectors is to calculate the area under the curve (AUC) of the Precision x Recall curve. As AP curves are often zigzag curves going up and down, comparing different curves (different detectors) in the same plot usually is not an easy task - because the curves tend to cross each other much frequently. That's why Average Precision (AP), a numerical metric, can also help us compare different detectors. In practice AP is the precision averaged across all recall values between 0 and 1.
156 |
157 | From 2010 on, the method of computing AP by the PASCAL VOC challenge has changed. Currently, **the interpolation performed by PASCAL VOC challenge uses all data points, rather than interpolating only 11 equally spaced points as stated in their [paper](http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.157.5766&rep=rep1&type=pdf)**. As we want to reproduce their default implementation, our default code (as seen further) follows their most recent application (interpolating all data points). However, we also offer the 11-point interpolation approach.
158 |
159 | #### 11-point interpolation
160 |
161 | The 11-point interpolation tries to summarize the shape of the Precision x Recall curve by averaging the precision at a set of eleven equally spaced recall levels [0, 0.1, 0.2, ... , 1]:
162 |
163 |
164 |
165 |
166 |
169 |
170 | with
171 |
172 |
173 |
174 |
175 |
178 |
179 | where  is the measured precision at recall .
180 |
181 | Instead of using the precision observed at each point, the AP is obtained by interpolating the precision only at the 11 levels  taking the **maximum precision whose recall value is greater than **.
182 |
183 | #### Interpolating all points
184 |
185 | Instead of interpolating only in the 11 equally spaced points, you could interpolate through all points
in such way that:
186 |
187 |
188 |
189 |
190 |
193 |
194 | with
195 |
196 |
197 |
198 |
199 |
200 |
203 |
204 |
205 | where  is the measured precision at recall .
206 |
207 | In this case, instead of using the precision observed at only few points, the AP is now obtained by interpolating the precision at **each level**,  taking the **maximum precision whose recall value is greater or equal than **. This way we calculate the estimated area under the curve.
208 |
209 | To make things more clear, we provided an example comparing both interpolations.
210 |
211 |
212 | #### An ilustrated example
213 |
214 | An example helps us understand better the concept of the interpolated average precision. Consider the detections below:
215 |
216 |
217 |
218 | 
219 |
220 | There are 7 images with 15 ground truth objects represented by the green bounding boxes and 24 detected objects represented by the red bounding boxes. Each detected object has a confidence level and is identified by a letter (A,B,...,Y).
221 |
222 | The following table shows the bounding boxes with their corresponding confidences. The last column identifies the detections as TP or FP. In this example a TP is considered if IOU  30%, otherwise it is a FP. By looking at the images above we can roughly tell if the detections are TP or FP.
223 |
224 |
225 |
226 | 
227 |
228 |
256 |
257 | In some images there are more than one detection overlapping a ground truth (Images 2, 3, 4, 5, 6 and 7). For those cases, the predicted box with the highest IOU is considered TP (e.g. in image 1 "E" is TP while "D" is FP because IOU between E and the groundtruth is greater than the IOU between D and the groundtruth). This rule is applied by the PASCAL VOC 2012 metric: "e.g. 5 detections (TP) of a single object is counted as 1 correct detection and 4 false detections”.
258 |
259 | The Precision x Recall curve is plotted by calculating the precision and recall values of the accumulated TP or FP detections. For this, first we need to order the detections by their confidences, then we calculate the precision and recall for each accumulated detection as shown in the table below (Note that for recall computation, the denominator term ("Acc TP + Acc FN" or "All ground truths") is constant at 15 since GT boxes are constant irrespective of detections).:
260 |
261 |
262 |
263 | 
264 |
265 |
293 |
294 | Example computation for the 2nd row (Image 7): Precision = TP/(TP+FP) = 1/2 = 0.5 and Recall = TP/(TP+FN) = 1/15 = 0.066
295 |
296 | Plotting the precision and recall values we have the following *Precision x Recall curve*:
297 |
298 |
299 |
300 |
301 |
302 |
303 | As mentioned before, there are two different ways to measure the interpolted average precision: **11-point interpolation** and **interpolating all points**. Below we make a comparisson between them:
304 |
305 | #### Calculating the 11-point interpolation
306 |
307 | The idea of the 11-point interpolated average precision is to average the precisions at a set of 11 recall levels (0,0.1,...,1). The interpolated precision values are obtained by taking the maximum precision whose recall value is greater than its current recall value as follows:
308 |
309 |
310 |
311 |
312 |
313 |
314 | By applying the 11-point interpolation, we have:
315 |
316 | 
317 | 
318 | 
319 |
320 |
321 | #### Calculating the interpolation performed in all points
322 |
323 | By interpolating all points, the Average Precision (AP) can be interpreted as an approximated AUC of the Precision x Recall curve. The intention is to reduce the impact of the wiggles in the curve. By applying the equations presented before, we can obtain the areas as it will be demostrated here. We could also visually have the interpolated precision points by looking at the recalls starting from the highest (0.4666) to 0 (looking at the plot from right to left) and, as we decrease the recall, we collect the precision values that are the highest as shown in the image below:
324 |
325 |
326 |
327 |
328 |
329 |
330 | Looking at the plot above, we can divide the AUC into 4 areas (A1, A2, A3 and A4):
331 |
332 |
333 |
334 |
335 |
336 |
337 | Calculating the total area, we have the AP:
338 |
339 | 
340 |
341 | 
342 | 
343 | 
344 | 
345 | 
346 |
347 | 
348 | 
349 | 
350 |
351 | The results between the two different interpolation methods are a little different: 24.56% and 26.84% by the every point interpolation and the 11-point interpolation respectively.
352 |
353 | Our default implementation is the same as VOC PASCAL: every point interpolation. If you want to use the 11-point interpolation, change the functions that use the argument ```method=MethodAveragePrecision.EveryPointInterpolation``` to ```method=MethodAveragePrecision.ElevenPointInterpolation```.
354 |
355 | If you want to reproduce these results, see the **[Sample 2](https://github.com/rafaelpadilla/Object-Detection-Metrics/tree/master/samples/sample_2/)**.
356 |
357 |
358 | ## How to use this project
359 |
360 | This project was created to evaluate your detections in a very easy way. If you want to evaluate your algorithm with the most used object detection metrics, you are in the right place.
361 |
362 | [Sample_1](https://github.com/rafaelpadilla/Object-Detection-Metrics/tree/master/samples/sample_1) and [sample_2](https://github.com/rafaelpadilla/Object-Detection-Metrics/tree/master/samples/sample_2) are practical examples demonstrating how to access directly the core functions of this project, providing more flexibility on the usage of the metrics. But if you don't want to spend your time understanding our code, see the instructions below to easily evaluate your detections:
363 |
364 | Follow the steps below to start evaluating your detections:
365 |
366 | 1. [Create the ground truth files](#create-the-ground-truth-files)
367 | 2. [Create your detection files](#create-your-detection-files)
368 | 3. For **Pascal VOC metrics**, run the command: `python pascalvoc.py`
369 | If you want to reproduce the example above, run the command: `python pascalvoc.py -t 0.3`
370 | 4. (Optional) [You can use arguments to control the IOU threshold, bounding boxes format, etc.](#optional-arguments)
371 |
372 | ### Create the ground truth files
373 |
374 | - Create a separate ground truth text file for each image in the folder **groundtruths/**.
375 | - In these files each line should be in the format: ` `.
376 | - E.g. The ground truth bounding boxes of the image "2008_000034.jpg" are represented in the file "2008_000034.txt":
377 | ```
378 | bottle 6 234 45 362
379 | person 1 156 103 336
380 | person 36 111 198 416
381 | person 91 42 338 500
382 | ```
383 |
384 | If you prefer, you can also have your bounding boxes in the format: ` ` (see here [**\***](#asterisk) how to use it). In this case, your "2008_000034.txt" would be represented as:
385 | ```
386 | bottle 6 234 39 128
387 | person 1 156 102 180
388 | person 36 111 162 305
389 | person 91 42 247 458
390 | ```
391 |
392 | ### Create your detection files
393 |
394 | - Create a separate detection text file for each image in the folder **detections/**.
395 | - The names of the detection files must match their correspond ground truth (e.g. "detections/2008_000182.txt" represents the detections of the ground truth: "groundtruths/2008_000182.txt").
396 | - In these files each line should be in the following format: ` ` (see here [**\***](#asterisk) how to use it).
397 | - E.g. "2008_000034.txt":
398 | ```
399 | bottle 0.14981 80 1 295 500
400 | bus 0.12601 36 13 404 316
401 | horse 0.12526 430 117 500 307
402 | pottedplant 0.14585 212 78 292 118
403 | tvmonitor 0.070565 388 89 500 196
404 | ```
405 |
406 | Also if you prefer, you could have your bounding boxes in the format: ` `.
407 |
408 | ### Optional arguments
409 |
410 | Optional arguments:
411 |
412 | | Argument | Description | Example | Default |
413 | |:-------------:|:-----------:|:-----------:|:-----------:|
414 | | `-h`,
`--help ` | show help message | `python pascalvoc.py -h` | |
415 | | `-v`,
`--version` | check version | `python pascalvoc.py -v` | |
416 | | `-gt`,
`--gtfolder` | folder that contains the ground truth bounding boxes files | `python pascalvoc.py -gt /home/whatever/my_groundtruths/` | `/Object-Detection-Metrics/groundtruths`|
417 | | `-det`,
`--detfolder` | folder that contains your detected bounding boxes files | `python pascalvoc.py -det /home/whatever/my_detections/` | `/Object-Detection-Metrics/detections/`|
418 | | `-t`,
`--threshold` | IOU thershold that tells if a detection is TP or FP | `python pascalvoc.py -t 0.75` | `0.50` |
419 | | `-gtformat` | format of the coordinates of the ground truth bounding boxes [**\***](#asterisk) | `python pascalvoc.py -gtformat xyrb` | `xywh` |
420 | | `-detformat` | format of the coordinates of the detected bounding boxes [**\***](#asterisk) | `python pascalvoc.py -detformat xyrb` | `xywh` | |
421 | | `-gtcoords` | reference of the ground truth bounding bounding box coordinates.
If the annotated coordinates are relative to the image size (as used in YOLO), set it to `rel`.
If the coordinates are absolute values, not depending to the image size, set it to `abs` | `python pascalvoc.py -gtcoords rel` | `abs` |
422 | | `-detcoords` | reference of the detected bounding bounding box coordinates.
If the coordinates are relative to the image size (as used in YOLO), set it to `rel`.
If the coordinates are absolute values, not depending to the image size, set it to `abs` | `python pascalvoc.py -detcoords rel` | `abs` |
423 | | `-imgsize ` | image size in the format `width,height` .
Required if `-gtcoords` or `-detcoords` is set to `rel` | `python pascalvoc.py -imgsize 600,400` |
424 | | `-sp`,
`--savepath` | folder where the plots are saved | `python pascalvoc.py -sp /home/whatever/my_results/` | `Object-Detection-Metrics/results/` |
425 | | `-np`,
`--noplot` | if present no plot is shown during execution | `python pascalvoc.py -np` | not presented.
Therefore, plots are shown |
426 |
427 |
428 | (**\***) set `-gtformat xywh` and/or `-detformat xywh` if format is ` `. Set to `-gtformat xyrb` and/or `-detformat xyrb` if format is ` `.
429 |
430 | ## References
431 |
432 | * The Relationship Between Precision-Recall and ROC Curves (Jesse Davis and Mark Goadrich)
433 | Department of Computer Sciences and Department of Biostatistics and Medical Informatics, University of
434 | Wisconsin
435 | http://pages.cs.wisc.edu/~jdavis/davisgoadrichcamera2.pdf
436 |
437 | * The PASCAL Visual Object Classes (VOC) Challenge
438 | http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.157.5766&rep=rep1&type=pdf
439 |
440 | * Evaluation of ranked retrieval results (Salton and Mcgill 1986)
441 | https://www.amazon.com/Introduction-Information-Retrieval-COMPUTER-SCIENCE/dp/0070544840
442 | https://nlp.stanford.edu/IR-book/html/htmledition/evaluation-of-ranked-retrieval-results-1.html
443 |
--------------------------------------------------------------------------------
/_init_paths.py:
--------------------------------------------------------------------------------
1 | ###########################################################################################
2 | # #
3 | # Set up paths for the Object Detection Metrics #
4 | # #
5 | # Developed by: Rafael Padilla (rafael.padilla@smt.ufrj.br) #
6 | # SMT - Signal Multimedia and Telecommunications Lab #
7 | # COPPE - Universidade Federal do Rio de Janeiro #
8 | # Last modification: May 24th 2018 #
9 | ###########################################################################################
10 |
11 | import sys
12 | import os
13 |
14 |
15 | def add_path(path):
16 | if path not in sys.path:
17 | sys.path.insert(0, path)
18 |
19 |
20 | currentPath = os.path.dirname(os.path.realpath(__file__))
21 |
22 | # Add lib to PYTHONPATH
23 | libPath = os.path.join(currentPath, 'lib')
24 | add_path(libPath)
25 |
--------------------------------------------------------------------------------
/aux_images/11-pointInterpolation.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/rafaelpadilla/Object-Detection-Metrics/dcb285e7dea7e73d9480937d58de0e9bdfc20051/aux_images/11-pointInterpolation.png
--------------------------------------------------------------------------------
/aux_images/interpolated_precision-AUC_v2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/rafaelpadilla/Object-Detection-Metrics/dcb285e7dea7e73d9480937d58de0e9bdfc20051/aux_images/interpolated_precision-AUC_v2.png
--------------------------------------------------------------------------------
/aux_images/interpolated_precision_v2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/rafaelpadilla/Object-Detection-Metrics/dcb285e7dea7e73d9480937d58de0e9bdfc20051/aux_images/interpolated_precision_v2.png
--------------------------------------------------------------------------------
/aux_images/iou.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/rafaelpadilla/Object-Detection-Metrics/dcb285e7dea7e73d9480937d58de0e9bdfc20051/aux_images/iou.png
--------------------------------------------------------------------------------
/aux_images/precision_recall_example_1_v2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/rafaelpadilla/Object-Detection-Metrics/dcb285e7dea7e73d9480937d58de0e9bdfc20051/aux_images/precision_recall_example_1_v2.png
--------------------------------------------------------------------------------
/aux_images/samples_1_v2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/rafaelpadilla/Object-Detection-Metrics/dcb285e7dea7e73d9480937d58de0e9bdfc20051/aux_images/samples_1_v2.png
--------------------------------------------------------------------------------
/aux_images/table_1_v2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/rafaelpadilla/Object-Detection-Metrics/dcb285e7dea7e73d9480937d58de0e9bdfc20051/aux_images/table_1_v2.png
--------------------------------------------------------------------------------
/aux_images/table_2_v2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/rafaelpadilla/Object-Detection-Metrics/dcb285e7dea7e73d9480937d58de0e9bdfc20051/aux_images/table_2_v2.png
--------------------------------------------------------------------------------
/detections/00001.txt:
--------------------------------------------------------------------------------
1 | person .88 5 67 31 48
2 | person .70 119 111 40 67
3 | person .80 124 9 49 67
4 |
--------------------------------------------------------------------------------
/detections/00002.txt:
--------------------------------------------------------------------------------
1 | person .71 64 111 64 58
2 | person .54 26 140 60 47
3 | person .74 19 18 43 35
4 |
--------------------------------------------------------------------------------
/detections/00003.txt:
--------------------------------------------------------------------------------
1 | person .18 109 15 77 39
2 | person .67 86 63 46 45
3 | person .38 160 62 36 53
4 | person .91 105 131 47 47
5 | person .44 18 148 40 44
6 |
--------------------------------------------------------------------------------
/detections/00004.txt:
--------------------------------------------------------------------------------
1 | person .35 83 28 28 26
2 | person .78 28 68 42 67
3 | person .45 87 89 25 39
4 | person .14 10 155 60 26
5 |
--------------------------------------------------------------------------------
/detections/00005.txt:
--------------------------------------------------------------------------------
1 | person .62 50 38 28 46
2 | person .44 95 11 53 28
3 | person .95 29 131 72 29
4 | person .23 29 163 72 29
5 |
--------------------------------------------------------------------------------
/detections/00006.txt:
--------------------------------------------------------------------------------
1 | person .45 43 48 74 38
2 | person .84 17 155 29 35
3 | person .43 95 110 25 42
4 |
--------------------------------------------------------------------------------
/detections/00007.txt:
--------------------------------------------------------------------------------
1 | person .48 16 20 101 88
2 | person .95 33 116 37 49
3 |
--------------------------------------------------------------------------------
/detections_rel/00001.txt:
--------------------------------------------------------------------------------
1 | person .88 0.10250000000000001 0.455 0.155 0.24
2 | person .70 0.6950000000000001 0.7225 0.2 0.335
3 | person .80 0.7425 0.2125 0.245 0.335
4 |
--------------------------------------------------------------------------------
/detections_rel/00002.txt:
--------------------------------------------------------------------------------
1 | person .71 0.48 0.7000000000000001 0.32 0.29
2 | person .54 0.28 0.8175 0.3 0.23500000000000001
3 | person .74 0.2025 0.1775 0.215 0.17500000000000002
4 |
--------------------------------------------------------------------------------
/detections_rel/00003.txt:
--------------------------------------------------------------------------------
1 | person .18 0.7375 0.17250000000000001 0.385 0.195
2 | person .67 0.545 0.4275 0.23 0.225
3 | person .38 0.89 0.4425 0.18 0.265
4 | person .91 0.6425 0.7725 0.23500000000000001 0.23500000000000001
5 | person .44 0.19 0.85 0.2 0.22
6 |
--------------------------------------------------------------------------------
/detections_rel/00004.txt:
--------------------------------------------------------------------------------
1 | person .35 0.485 0.20500000000000002 0.14 0.13
2 | person .78 0.245 0.5075000000000001 0.21 0.335
3 | person .45 0.4975 0.5425 0.125 0.195
4 | person .14 0.2 0.84 0.3 0.13
5 |
--------------------------------------------------------------------------------
/detections_rel/00005.txt:
--------------------------------------------------------------------------------
1 | person .62 0.32 0.305 0.14 0.23
2 | person .44 0.6075 0.125 0.265 0.14
3 | person .95 0.325 0.7275 0.36 0.145
4 | person .23 0.325 0.8875000000000001 0.36 0.145
5 |
--------------------------------------------------------------------------------
/detections_rel/00006.txt:
--------------------------------------------------------------------------------
1 | person .45 0.4 0.335 0.37 0.19
2 | person .84 0.1575 0.8625 0.145 0.17500000000000002
3 | person .43 0.5375 0.655 0.125 0.21
4 |
--------------------------------------------------------------------------------
/detections_rel/00007.txt:
--------------------------------------------------------------------------------
1 | person .48 0.3325 0.32 0.505 0.44
2 | person .95 0.2575 0.7025 0.185 0.245
3 |
--------------------------------------------------------------------------------
/groundtruths/00001.txt:
--------------------------------------------------------------------------------
1 | person 25 16 38 56
2 | person 129 123 41 62
3 |
--------------------------------------------------------------------------------
/groundtruths/00002.txt:
--------------------------------------------------------------------------------
1 | person 123 11 43 55
2 | person 38 132 59 45
3 |
--------------------------------------------------------------------------------
/groundtruths/00003.txt:
--------------------------------------------------------------------------------
1 | person 16 14 35 48
2 | person 123 30 49 44
3 | person 99 139 47 47
4 |
--------------------------------------------------------------------------------
/groundtruths/00004.txt:
--------------------------------------------------------------------------------
1 | person 53 42 40 52
2 | person 154 43 31 34
3 |
--------------------------------------------------------------------------------
/groundtruths/00005.txt:
--------------------------------------------------------------------------------
1 | person 59 31 44 51
2 | person 48 128 34 52
3 |
--------------------------------------------------------------------------------
/groundtruths/00006.txt:
--------------------------------------------------------------------------------
1 | person 36 89 52 76
2 | person 62 58 44 67
3 |
--------------------------------------------------------------------------------
/groundtruths/00007.txt:
--------------------------------------------------------------------------------
1 | person 28 31 55 63
2 | person 58 67 50 58
3 |
--------------------------------------------------------------------------------
/groundtruths_rel/00001.txt:
--------------------------------------------------------------------------------
1 | person 0.22 0.22 0.19 0.28
2 | person 0.7475 0.77 0.20500000000000002 0.31
3 |
--------------------------------------------------------------------------------
/groundtruths_rel/00002.txt:
--------------------------------------------------------------------------------
1 | person 0.7225 0.1925 0.215 0.275
2 | person 0.3375 0.7725 0.295 0.225
3 |
--------------------------------------------------------------------------------
/groundtruths_rel/00003.txt:
--------------------------------------------------------------------------------
1 | person 0.1675 0.19 0.17500000000000002 0.24
2 | person 0.7375 0.26 0.245 0.22
3 | person 0.6125 0.8125 0.23500000000000001 0.23500000000000001
4 |
--------------------------------------------------------------------------------
/groundtruths_rel/00004.txt:
--------------------------------------------------------------------------------
1 | person 0.365 0.34 0.2 0.26
2 | person 0.8475 0.3 0.155 0.17
3 |
--------------------------------------------------------------------------------
/groundtruths_rel/00005.txt:
--------------------------------------------------------------------------------
1 | person 0.405 0.28250000000000003 0.22 0.255
2 | person 0.325 0.77 0.17 0.26
3 |
--------------------------------------------------------------------------------
/groundtruths_rel/00006.txt:
--------------------------------------------------------------------------------
1 | person 0.31 0.635 0.26 0.38
2 | person 0.42 0.4575 0.22 0.335
3 |
--------------------------------------------------------------------------------
/groundtruths_rel/00007.txt:
--------------------------------------------------------------------------------
1 | person 0.2775 0.3125 0.275 0.315
2 | person 0.41500000000000004 0.48 0.25 0.29
3 |
--------------------------------------------------------------------------------
/lib/BoundingBox.py:
--------------------------------------------------------------------------------
1 | from utils import *
2 |
3 |
4 | class BoundingBox:
5 | def __init__(self,
6 | imageName,
7 | classId,
8 | x,
9 | y,
10 | w,
11 | h,
12 | typeCoordinates=CoordinatesType.Absolute,
13 | imgSize=None,
14 | bbType=BBType.GroundTruth,
15 | classConfidence=None,
16 | format=BBFormat.XYWH):
17 | """Constructor.
18 | Args:
19 | imageName: String representing the image name.
20 | classId: String value representing class id.
21 | x: Float value representing the X upper-left coordinate of the bounding box.
22 | y: Float value representing the Y upper-left coordinate of the bounding box.
23 | w: Float value representing the width bounding box.
24 | h: Float value representing the height bounding box.
25 | typeCoordinates: (optional) Enum (Relative or Absolute) represents if the bounding box
26 | coordinates (x,y,w,h) are absolute or relative to size of the image. Default:'Absolute'.
27 | imgSize: (optional) 2D vector (width, height)=>(int, int) represents the size of the
28 | image of the bounding box. If typeCoordinates is 'Relative', imgSize is required.
29 | bbType: (optional) Enum (Groundtruth or Detection) identifies if the bounding box
30 | represents a ground truth or a detection. If it is a detection, the classConfidence has
31 | to be informed.
32 | classConfidence: (optional) Float value representing the confidence of the detected
33 | class. If detectionType is Detection, classConfidence needs to be informed.
34 | format: (optional) Enum (BBFormat.XYWH or BBFormat.XYX2Y2) indicating the format of the
35 | coordinates of the bounding boxes. BBFormat.XYWH:
36 | BBFormat.XYX2Y2: .
37 | """
38 | self._imageName = imageName
39 | self._typeCoordinates = typeCoordinates
40 | if typeCoordinates == CoordinatesType.Relative and imgSize is None:
41 | raise IOError(
42 | 'Parameter \'imgSize\' is required. It is necessary to inform the image size.')
43 | if bbType == BBType.Detected and classConfidence is None:
44 | raise IOError(
45 | 'For bbType=\'Detection\', it is necessary to inform the classConfidence value.')
46 | # if classConfidence != None and (classConfidence < 0 or classConfidence > 1):
47 | # raise IOError('classConfidence value must be a real value between 0 and 1. Value: %f' %
48 | # classConfidence)
49 |
50 | self._classConfidence = classConfidence
51 | self._bbType = bbType
52 | self._classId = classId
53 | self._format = format
54 |
55 | # If relative coordinates, convert to absolute values
56 | # For relative coords: (x,y,w,h)=(X_center/img_width , Y_center/img_height)
57 | if (typeCoordinates == CoordinatesType.Relative):
58 | (self._x, self._y, self._w, self._h) = convertToAbsoluteValues(imgSize, (x, y, w, h))
59 | self._width_img = imgSize[0]
60 | self._height_img = imgSize[1]
61 | if format == BBFormat.XYWH:
62 | self._x2 = self._w
63 | self._y2 = self._h
64 | self._w = self._x2 - self._x
65 | self._h = self._y2 - self._y
66 | else:
67 | raise IOError(
68 | 'For relative coordinates, the format must be XYWH (x,y,width,height)')
69 | # For absolute coords: (x,y,w,h)=real bb coords
70 | else:
71 | self._x = x
72 | self._y = y
73 | if format == BBFormat.XYWH:
74 | self._w = w
75 | self._h = h
76 | self._x2 = self._x + self._w
77 | self._y2 = self._y + self._h
78 | else: # format == BBFormat.XYX2Y2: .
79 | self._x2 = w
80 | self._y2 = h
81 | self._w = self._x2 - self._x
82 | self._h = self._y2 - self._y
83 | if imgSize is None:
84 | self._width_img = None
85 | self._height_img = None
86 | else:
87 | self._width_img = imgSize[0]
88 | self._height_img = imgSize[1]
89 |
90 | def getAbsoluteBoundingBox(self, format=BBFormat.XYWH):
91 | if format == BBFormat.XYWH:
92 | return (self._x, self._y, self._w, self._h)
93 | elif format == BBFormat.XYX2Y2:
94 | return (self._x, self._y, self._x2, self._y2)
95 |
96 | def getRelativeBoundingBox(self, imgSize=None):
97 | if imgSize is None and self._width_img is None and self._height_img is None:
98 | raise IOError(
99 | 'Parameter \'imgSize\' is required. It is necessary to inform the image size.')
100 | if imgSize is not None:
101 | return convertToRelativeValues((imgSize[0], imgSize[1]),
102 | (self._x, self._x2, self._y, self._y2))
103 | else:
104 | return convertToRelativeValues((self._width_img, self._height_img),
105 | (self._x, self._x2, self._y, self._y2))
106 |
107 | def getImageName(self):
108 | return self._imageName
109 |
110 | def getConfidence(self):
111 | return self._classConfidence
112 |
113 | def getFormat(self):
114 | return self._format
115 |
116 | def getClassId(self):
117 | return self._classId
118 |
119 | def getImageSize(self):
120 | return (self._width_img, self._height_img)
121 |
122 | def getCoordinatesType(self):
123 | return self._typeCoordinates
124 |
125 | def getBBType(self):
126 | return self._bbType
127 |
128 | @staticmethod
129 | def compare(det1, det2):
130 | det1BB = det1.getAbsoluteBoundingBox()
131 | det1ImgSize = det1.getImageSize()
132 | det2BB = det2.getAbsoluteBoundingBox()
133 | det2ImgSize = det2.getImageSize()
134 |
135 | if det1.getClassId() == det2.getClassId() and \
136 | det1.classConfidence == det2.classConfidenc() and \
137 | det1BB[0] == det2BB[0] and \
138 | det1BB[1] == det2BB[1] and \
139 | det1BB[2] == det2BB[2] and \
140 | det1BB[3] == det2BB[3] and \
141 | det1ImgSize[0] == det2ImgSize[0] and \
142 | det1ImgSize[1] == det2ImgSize[1]:
143 | return True
144 | return False
145 |
146 | @staticmethod
147 | def clone(boundingBox):
148 | absBB = boundingBox.getAbsoluteBoundingBox(format=BBFormat.XYWH)
149 | # return (self._x,self._y,self._x2,self._y2)
150 | newBoundingBox = BoundingBox(boundingBox.getImageName(),
151 | boundingBox.getClassId(),
152 | absBB[0],
153 | absBB[1],
154 | absBB[2],
155 | absBB[3],
156 | typeCoordinates=boundingBox.getCoordinatesType(),
157 | imgSize=boundingBox.getImageSize(),
158 | bbType=boundingBox.getBBType(),
159 | classConfidence=boundingBox.getConfidence(),
160 | format=BBFormat.XYWH)
161 | return newBoundingBox
162 |
--------------------------------------------------------------------------------
/lib/BoundingBoxes.py:
--------------------------------------------------------------------------------
1 | from BoundingBox import *
2 | from utils import *
3 |
4 |
5 | class BoundingBoxes:
6 | def __init__(self):
7 | self._boundingBoxes = []
8 |
9 | def addBoundingBox(self, bb):
10 | self._boundingBoxes.append(bb)
11 |
12 | def removeBoundingBox(self, _boundingBox):
13 | for d in self._boundingBoxes:
14 | if BoundingBox.compare(d, _boundingBox):
15 | del self._boundingBoxes[d]
16 | return
17 |
18 | def removeAllBoundingBoxes(self):
19 | self._boundingBoxes = []
20 |
21 | def getBoundingBoxes(self):
22 | return self._boundingBoxes
23 |
24 | def getBoundingBoxByClass(self, classId):
25 | boundingBoxes = []
26 | for d in self._boundingBoxes:
27 | if d.getClassId() == classId: # get only specified bounding box type
28 | boundingBoxes.append(d)
29 | return boundingBoxes
30 |
31 | def getClasses(self):
32 | classes = []
33 | for d in self._boundingBoxes:
34 | c = d.getClassId()
35 | if c not in classes:
36 | classes.append(c)
37 | return classes
38 |
39 | def getBoundingBoxesByType(self, bbType):
40 | # get only specified bb type
41 | return [d for d in self._boundingBoxes if d.getBBType() == bbType]
42 |
43 | def getBoundingBoxesByImageName(self, imageName):
44 | # get only specified bb type
45 | return [d for d in self._boundingBoxes if d.getImageName() == imageName]
46 |
47 | def count(self, bbType=None):
48 | if bbType is None: # Return all bounding boxes
49 | return len(self._boundingBoxes)
50 | count = 0
51 | for d in self._boundingBoxes:
52 | if d.getBBType() == bbType: # get only specified bb type
53 | count += 1
54 | return count
55 |
56 | def clone(self):
57 | newBoundingBoxes = BoundingBoxes()
58 | for d in self._boundingBoxes:
59 | det = BoundingBox.clone(d)
60 | newBoundingBoxes.addBoundingBox(det)
61 | return newBoundingBoxes
62 |
63 | def drawAllBoundingBoxes(self, image, imageName):
64 | bbxes = self.getBoundingBoxesByImageName(imageName)
65 | for bb in bbxes:
66 | if bb.getBBType() == BBType.GroundTruth: # if ground truth
67 | image = add_bb_into_image(image, bb, color=(0, 255, 0)) # green
68 | else: # if detection
69 | image = add_bb_into_image(image, bb, color=(255, 0, 0)) # red
70 | return image
71 |
72 | # def drawAllBoundingBoxes(self, image):
73 | # for gt in self.getBoundingBoxesByType(BBType.GroundTruth):
74 | # image = add_bb_into_image(image, gt ,color=(0,255,0))
75 | # for det in self.getBoundingBoxesByType(BBType.Detected):
76 | # image = add_bb_into_image(image, det ,color=(255,0,0))
77 | # return image
78 |
--------------------------------------------------------------------------------
/lib/Evaluator.py:
--------------------------------------------------------------------------------
1 | ###########################################################################################
2 | # #
3 | # Evaluator class: Implements the most popular metrics for object detection #
4 | # #
5 | # Developed by: Rafael Padilla (rafael.padilla@smt.ufrj.br) #
6 | # SMT - Signal Multimedia and Telecommunications Lab #
7 | # COPPE - Universidade Federal do Rio de Janeiro #
8 | # Last modification: Oct 9th 2018 #
9 | ###########################################################################################
10 |
11 | import os
12 | import sys
13 | from collections import Counter
14 |
15 | import matplotlib.pyplot as plt
16 | import numpy as np
17 |
18 | from BoundingBox import *
19 | from BoundingBoxes import *
20 | from utils import *
21 |
22 |
23 | class Evaluator:
24 | def GetPascalVOCMetrics(self,
25 | boundingboxes,
26 | IOUThreshold=0.5,
27 | method=MethodAveragePrecision.EveryPointInterpolation):
28 | """Get the metrics used by the VOC Pascal 2012 challenge.
29 | Get
30 | Args:
31 | boundingboxes: Object of the class BoundingBoxes representing ground truth and detected
32 | bounding boxes;
33 | IOUThreshold: IOU threshold indicating which detections will be considered TP or FP
34 | (default value = 0.5);
35 | method (default = EveryPointInterpolation): It can be calculated as the implementation
36 | in the official PASCAL VOC toolkit (EveryPointInterpolation), or applying the 11-point
37 | interpolatio as described in the paper "The PASCAL Visual Object Classes(VOC) Challenge"
38 | or EveryPointInterpolation" (ElevenPointInterpolation);
39 | Returns:
40 | A list of dictionaries. Each dictionary contains information and metrics of each class.
41 | The keys of each dictionary are:
42 | dict['class']: class representing the current dictionary;
43 | dict['precision']: array with the precision values;
44 | dict['recall']: array with the recall values;
45 | dict['AP']: average precision;
46 | dict['interpolated precision']: interpolated precision values;
47 | dict['interpolated recall']: interpolated recall values;
48 | dict['total positives']: total number of ground truth positives;
49 | dict['total TP']: total number of True Positive detections;
50 | dict['total FP']: total number of False Positive detections;
51 | """
52 | ret = [] # list containing metrics (precision, recall, average precision) of each class
53 | # List with all ground truths (Ex: [imageName,class,confidence=1, (bb coordinates XYX2Y2)])
54 | groundTruths = []
55 | # List with all detections (Ex: [imageName,class,confidence,(bb coordinates XYX2Y2)])
56 | detections = []
57 | # Get all classes
58 | classes = []
59 | # Loop through all bounding boxes and separate them into GTs and detections
60 | for bb in boundingboxes.getBoundingBoxes():
61 | # [imageName, class, confidence, (bb coordinates XYX2Y2)]
62 | if bb.getBBType() == BBType.GroundTruth:
63 | groundTruths.append([
64 | bb.getImageName(),
65 | bb.getClassId(), 1,
66 | bb.getAbsoluteBoundingBox(BBFormat.XYX2Y2)
67 | ])
68 | else:
69 | detections.append([
70 | bb.getImageName(),
71 | bb.getClassId(),
72 | bb.getConfidence(),
73 | bb.getAbsoluteBoundingBox(BBFormat.XYX2Y2)
74 | ])
75 | # get class
76 | if bb.getClassId() not in classes:
77 | classes.append(bb.getClassId())
78 | classes = sorted(classes)
79 | # Precision x Recall is obtained individually by each class
80 | # Loop through by classes
81 | for c in classes:
82 | # Get only detection of class c
83 | dects = []
84 | [dects.append(d) for d in detections if d[1] == c]
85 | # Get only ground truths of class c, use filename as key
86 | gts = {}
87 | npos = 0
88 | for g in groundTruths:
89 | if g[1] == c:
90 | npos += 1
91 | gts[g[0]] = gts.get(g[0], []) + [g]
92 |
93 | # sort detections by decreasing confidence
94 | dects = sorted(dects, key=lambda conf: conf[2], reverse=True)
95 | TP = np.zeros(len(dects))
96 | FP = np.zeros(len(dects))
97 | # create dictionary with amount of gts for each image
98 | det = {key: np.zeros(len(gts[key])) for key in gts}
99 |
100 | # print("Evaluating class: %s (%d detections)" % (str(c), len(dects)))
101 | # Loop through detections
102 | for d in range(len(dects)):
103 | # print('dect %s => %s' % (dects[d][0], dects[d][3],))
104 | # Find ground truth image
105 | gt = gts[dects[d][0]] if dects[d][0] in gts else []
106 | iouMax = sys.float_info.min
107 | for j in range(len(gt)):
108 | # print('Ground truth gt => %s' % (gt[j][3],))
109 | iou = Evaluator.iou(dects[d][3], gt[j][3])
110 | if iou > iouMax:
111 | iouMax = iou
112 | jmax = j
113 | # Assign detection as true positive/don't care/false positive
114 | if iouMax >= IOUThreshold:
115 | if det[dects[d][0]][jmax] == 0:
116 | TP[d] = 1 # count as true positive
117 | det[dects[d][0]][jmax] = 1 # flag as already 'seen'
118 | # print("TP")
119 | else:
120 | FP[d] = 1 # count as false positive
121 | # print("FP")
122 | # - A detected "cat" is overlaped with a GT "cat" with IOU >= IOUThreshold.
123 | else:
124 | FP[d] = 1 # count as false positive
125 | # print("FP")
126 | # compute precision, recall and average precision
127 | acc_FP = np.cumsum(FP)
128 | acc_TP = np.cumsum(TP)
129 | rec = acc_TP / npos
130 | prec = np.divide(acc_TP, (acc_FP + acc_TP))
131 | # Depending on the method, call the right implementation
132 | if method == MethodAveragePrecision.EveryPointInterpolation:
133 | [ap, mpre, mrec, ii] = Evaluator.CalculateAveragePrecision(rec, prec)
134 | else:
135 | [ap, mpre, mrec, _] = Evaluator.ElevenPointInterpolatedAP(rec, prec)
136 | # add class result in the dictionary to be returned
137 | r = {
138 | 'class': c,
139 | 'precision': prec,
140 | 'recall': rec,
141 | 'AP': ap,
142 | 'interpolated precision': mpre,
143 | 'interpolated recall': mrec,
144 | 'total positives': npos,
145 | 'total TP': np.sum(TP),
146 | 'total FP': np.sum(FP)
147 | }
148 | ret.append(r)
149 | return ret
150 |
151 | def PlotPrecisionRecallCurve(self,
152 | boundingBoxes,
153 | IOUThreshold=0.5,
154 | method=MethodAveragePrecision.EveryPointInterpolation,
155 | showAP=False,
156 | showInterpolatedPrecision=False,
157 | savePath=None,
158 | showGraphic=True):
159 | """PlotPrecisionRecallCurve
160 | Plot the Precision x Recall curve for a given class.
161 | Args:
162 | boundingBoxes: Object of the class BoundingBoxes representing ground truth and detected
163 | bounding boxes;
164 | IOUThreshold (optional): IOU threshold indicating which detections will be considered
165 | TP or FP (default value = 0.5);
166 | method (default = EveryPointInterpolation): It can be calculated as the implementation
167 | in the official PASCAL VOC toolkit (EveryPointInterpolation), or applying the 11-point
168 | interpolatio as described in the paper "The PASCAL Visual Object Classes(VOC) Challenge"
169 | or EveryPointInterpolation" (ElevenPointInterpolation).
170 | showAP (optional): if True, the average precision value will be shown in the title of
171 | the graph (default = False);
172 | showInterpolatedPrecision (optional): if True, it will show in the plot the interpolated
173 | precision (default = False);
174 | savePath (optional): if informed, the plot will be saved as an image in this path
175 | (ex: /home/mywork/ap.png) (default = None);
176 | showGraphic (optional): if True, the plot will be shown (default = True)
177 | Returns:
178 | A list of dictionaries. Each dictionary contains information and metrics of each class.
179 | The keys of each dictionary are:
180 | dict['class']: class representing the current dictionary;
181 | dict['precision']: array with the precision values;
182 | dict['recall']: array with the recall values;
183 | dict['AP']: average precision;
184 | dict['interpolated precision']: interpolated precision values;
185 | dict['interpolated recall']: interpolated recall values;
186 | dict['total positives']: total number of ground truth positives;
187 | dict['total TP']: total number of True Positive detections;
188 | dict['total FP']: total number of False Negative detections;
189 | """
190 | results = self.GetPascalVOCMetrics(boundingBoxes, IOUThreshold, method)
191 | result = None
192 | # Each resut represents a class
193 | for result in results:
194 | if result is None:
195 | raise IOError('Error: Class %d could not be found.' % classId)
196 |
197 | classId = result['class']
198 | precision = result['precision']
199 | recall = result['recall']
200 | average_precision = result['AP']
201 | mpre = result['interpolated precision']
202 | mrec = result['interpolated recall']
203 | npos = result['total positives']
204 | total_tp = result['total TP']
205 | total_fp = result['total FP']
206 |
207 | plt.close()
208 | if showInterpolatedPrecision:
209 | if method == MethodAveragePrecision.EveryPointInterpolation:
210 | plt.plot(mrec, mpre, '--r', label='Interpolated precision (every point)')
211 | elif method == MethodAveragePrecision.ElevenPointInterpolation:
212 | # Uncomment the line below if you want to plot the area
213 | # plt.plot(mrec, mpre, 'or', label='11-point interpolated precision')
214 | # Remove duplicates, getting only the highest precision of each recall value
215 | nrec = []
216 | nprec = []
217 | for idx in range(len(mrec)):
218 | r = mrec[idx]
219 | if r not in nrec:
220 | idxEq = np.argwhere(mrec == r)
221 | nrec.append(r)
222 | nprec.append(max([mpre[int(id)] for id in idxEq]))
223 | plt.plot(nrec, nprec, 'or', label='11-point interpolated precision')
224 | plt.plot(recall, precision, label='Precision')
225 | plt.xlabel('recall')
226 | plt.ylabel('precision')
227 | if showAP:
228 | ap_str = "{0:.2f}%".format(average_precision * 100)
229 | # ap_str = "{0:.4f}%".format(average_precision * 100)
230 | plt.title('Precision x Recall curve \nClass: %s, AP: %s' % (str(classId), ap_str))
231 | else:
232 | plt.title('Precision x Recall curve \nClass: %s' % str(classId))
233 | plt.legend(shadow=True)
234 | plt.grid()
235 | ############################################################
236 | # Uncomment the following block to create plot with points #
237 | ############################################################
238 | # plt.plot(recall, precision, 'bo')
239 | # labels = ['R', 'Y', 'J', 'A', 'U', 'C', 'M', 'F', 'D', 'B', 'H', 'P', 'E', 'X', 'N', 'T',
240 | # 'K', 'Q', 'V', 'I', 'L', 'S', 'G', 'O']
241 | # dicPosition = {}
242 | # dicPosition['left_zero'] = (-30,0)
243 | # dicPosition['left_zero_slight'] = (-30,-10)
244 | # dicPosition['right_zero'] = (30,0)
245 | # dicPosition['left_up'] = (-30,20)
246 | # dicPosition['left_down'] = (-30,-25)
247 | # dicPosition['right_up'] = (20,20)
248 | # dicPosition['right_down'] = (20,-20)
249 | # dicPosition['up_zero'] = (0,30)
250 | # dicPosition['up_right'] = (0,30)
251 | # dicPosition['left_zero_long'] = (-60,-2)
252 | # dicPosition['down_zero'] = (-2,-30)
253 | # vecPositions = [
254 | # dicPosition['left_down'],
255 | # dicPosition['left_zero'],
256 | # dicPosition['right_zero'],
257 | # dicPosition['right_zero'], #'R', 'Y', 'J', 'A',
258 | # dicPosition['left_up'],
259 | # dicPosition['left_up'],
260 | # dicPosition['right_up'],
261 | # dicPosition['left_up'], # 'U', 'C', 'M', 'F',
262 | # dicPosition['left_zero'],
263 | # dicPosition['right_up'],
264 | # dicPosition['right_down'],
265 | # dicPosition['down_zero'], #'D', 'B', 'H', 'P'
266 | # dicPosition['left_up'],
267 | # dicPosition['up_zero'],
268 | # dicPosition['right_up'],
269 | # dicPosition['left_up'], # 'E', 'X', 'N', 'T',
270 | # dicPosition['left_zero'],
271 | # dicPosition['right_zero'],
272 | # dicPosition['left_zero_long'],
273 | # dicPosition['left_zero_slight'], # 'K', 'Q', 'V', 'I',
274 | # dicPosition['right_down'],
275 | # dicPosition['left_down'],
276 | # dicPosition['right_up'],
277 | # dicPosition['down_zero']
278 | # ] # 'L', 'S', 'G', 'O'
279 | # for idx in range(len(labels)):
280 | # box = dict(boxstyle='round,pad=.5',facecolor='yellow',alpha=0.5)
281 | # plt.annotate(labels[idx],
282 | # xy=(recall[idx],precision[idx]), xycoords='data',
283 | # xytext=vecPositions[idx], textcoords='offset points',
284 | # arrowprops=dict(arrowstyle="->", connectionstyle="arc3"),
285 | # bbox=box)
286 | if savePath is not None:
287 | plt.savefig(os.path.join(savePath, str(classId) + '.png'))
288 | if showGraphic is True:
289 | plt.show()
290 | # plt.waitforbuttonpress()
291 | plt.pause(0.05)
292 | return results
293 |
294 | @staticmethod
295 | def CalculateAveragePrecision(rec, prec):
296 | mrec = []
297 | mrec.append(0)
298 | [mrec.append(e) for e in rec]
299 | mrec.append(1)
300 | mpre = []
301 | mpre.append(0)
302 | [mpre.append(e) for e in prec]
303 | mpre.append(0)
304 | for i in range(len(mpre) - 1, 0, -1):
305 | mpre[i - 1] = max(mpre[i - 1], mpre[i])
306 | ii = []
307 | for i in range(len(mrec) - 1):
308 | if mrec[1+i] != mrec[i]:
309 | ii.append(i + 1)
310 | ap = 0
311 | for i in ii:
312 | ap = ap + np.sum((mrec[i] - mrec[i - 1]) * mpre[i])
313 | # return [ap, mpre[1:len(mpre)-1], mrec[1:len(mpre)-1], ii]
314 | return [ap, mpre[0:len(mpre) - 1], mrec[0:len(mpre) - 1], ii]
315 |
316 | @staticmethod
317 | # 11-point interpolated average precision
318 | def ElevenPointInterpolatedAP(rec, prec):
319 | # def CalculateAveragePrecision2(rec, prec):
320 | mrec = []
321 | # mrec.append(0)
322 | [mrec.append(e) for e in rec]
323 | # mrec.append(1)
324 | mpre = []
325 | # mpre.append(0)
326 | [mpre.append(e) for e in prec]
327 | # mpre.append(0)
328 | recallValues = np.linspace(0, 1, 11)
329 | recallValues = list(recallValues[::-1])
330 | rhoInterp = []
331 | recallValid = []
332 | # For each recallValues (0, 0.1, 0.2, ... , 1)
333 | for r in recallValues:
334 | # Obtain all recall values higher or equal than r
335 | argGreaterRecalls = np.argwhere(mrec[:] >= r)
336 | pmax = 0
337 | # If there are recalls above r
338 | if argGreaterRecalls.size != 0:
339 | pmax = max(mpre[argGreaterRecalls.min():])
340 | recallValid.append(r)
341 | rhoInterp.append(pmax)
342 | # By definition AP = sum(max(precision whose recall is above r))/11
343 | ap = sum(rhoInterp) / 11
344 | # Generating values for the plot
345 | rvals = []
346 | rvals.append(recallValid[0])
347 | [rvals.append(e) for e in recallValid]
348 | rvals.append(0)
349 | pvals = []
350 | pvals.append(0)
351 | [pvals.append(e) for e in rhoInterp]
352 | pvals.append(0)
353 | # rhoInterp = rhoInterp[::-1]
354 | cc = []
355 | for i in range(len(rvals)):
356 | p = (rvals[i], pvals[i - 1])
357 | if p not in cc:
358 | cc.append(p)
359 | p = (rvals[i], pvals[i])
360 | if p not in cc:
361 | cc.append(p)
362 | recallValues = [i[0] for i in cc]
363 | rhoInterp = [i[1] for i in cc]
364 | return [ap, rhoInterp, recallValues, None]
365 |
366 | # For each detections, calculate IOU with reference
367 | @staticmethod
368 | def _getAllIOUs(reference, detections):
369 | ret = []
370 | bbReference = reference.getAbsoluteBoundingBox(BBFormat.XYX2Y2)
371 | # img = np.zeros((200,200,3), np.uint8)
372 | for d in detections:
373 | bb = d.getAbsoluteBoundingBox(BBFormat.XYX2Y2)
374 | iou = Evaluator.iou(bbReference, bb)
375 | # Show blank image with the bounding boxes
376 | # img = add_bb_into_image(img, d, color=(255,0,0), thickness=2, label=None)
377 | # img = add_bb_into_image(img, reference, color=(0,255,0), thickness=2, label=None)
378 | ret.append((iou, reference, d)) # iou, reference, detection
379 | # cv2.imshow("comparing",img)
380 | # cv2.waitKey(0)
381 | # cv2.destroyWindow("comparing")
382 | return sorted(ret, key=lambda i: i[0], reverse=True) # sort by iou (from highest to lowest)
383 |
384 | @staticmethod
385 | def iou(boxA, boxB):
386 | # if boxes dont intersect
387 | if Evaluator._boxesIntersect(boxA, boxB) is False:
388 | return 0
389 | interArea = Evaluator._getIntersectionArea(boxA, boxB)
390 | union = Evaluator._getUnionAreas(boxA, boxB, interArea=interArea)
391 | # intersection over union
392 | iou = interArea / union
393 | assert iou >= 0
394 | return iou
395 |
396 | # boxA = (Ax1,Ay1,Ax2,Ay2)
397 | # boxB = (Bx1,By1,Bx2,By2)
398 | @staticmethod
399 | def _boxesIntersect(boxA, boxB):
400 | if boxA[0] > boxB[2]:
401 | return False # boxA is right of boxB
402 | if boxB[0] > boxA[2]:
403 | return False # boxA is left of boxB
404 | if boxA[3] < boxB[1]:
405 | return False # boxA is above boxB
406 | if boxA[1] > boxB[3]:
407 | return False # boxA is below boxB
408 | return True
409 |
410 | @staticmethod
411 | def _getIntersectionArea(boxA, boxB):
412 | xA = max(boxA[0], boxB[0])
413 | yA = max(boxA[1], boxB[1])
414 | xB = min(boxA[2], boxB[2])
415 | yB = min(boxA[3], boxB[3])
416 | # intersection area
417 | return (xB - xA + 1) * (yB - yA + 1)
418 |
419 | @staticmethod
420 | def _getUnionAreas(boxA, boxB, interArea=None):
421 | area_A = Evaluator._getArea(boxA)
422 | area_B = Evaluator._getArea(boxB)
423 | if interArea is None:
424 | interArea = Evaluator._getIntersectionArea(boxA, boxB)
425 | return float(area_A + area_B - interArea)
426 |
427 | @staticmethod
428 | def _getArea(box):
429 | return (box[2] - box[0] + 1) * (box[3] - box[1] + 1)
430 |
--------------------------------------------------------------------------------
/lib/__init__.py:
--------------------------------------------------------------------------------
1 | ###########################################################################################
2 | # Developed by: Rafael Padilla #
3 | # SMT - Signal Multimedia and Telecommunications Lab #
4 | # COPPE - Universidade Federal do Rio de Janeiro #
5 | # Last modification: May 24th 2018 #
6 | ###########################################################################################
7 |
--------------------------------------------------------------------------------
/lib/utils.py:
--------------------------------------------------------------------------------
1 | from enum import Enum
2 |
3 | import cv2
4 |
5 |
6 | class MethodAveragePrecision(Enum):
7 | """
8 | Class representing if the coordinates are relative to the
9 | image size or are absolute values.
10 |
11 | Developed by: Rafael Padilla
12 | Last modification: Apr 28 2018
13 | """
14 | EveryPointInterpolation = 1
15 | ElevenPointInterpolation = 2
16 |
17 |
18 | class CoordinatesType(Enum):
19 | """
20 | Class representing if the coordinates are relative to the
21 | image size or are absolute values.
22 |
23 | Developed by: Rafael Padilla
24 | Last modification: Apr 28 2018
25 | """
26 | Relative = 1
27 | Absolute = 2
28 |
29 |
30 | class BBType(Enum):
31 | """
32 | Class representing if the bounding box is groundtruth or not.
33 |
34 | Developed by: Rafael Padilla
35 | Last modification: May 24 2018
36 | """
37 | GroundTruth = 1
38 | Detected = 2
39 |
40 |
41 | class BBFormat(Enum):
42 | """
43 | Class representing the format of a bounding box.
44 | It can be (X,Y,width,height) => XYWH
45 | or (X1,Y1,X2,Y2) => XYX2Y2
46 |
47 | Developed by: Rafael Padilla
48 | Last modification: May 24 2018
49 | """
50 | XYWH = 1
51 | XYX2Y2 = 2
52 |
53 |
54 | # size => (width, height) of the image
55 | # box => (X1, X2, Y1, Y2) of the bounding box
56 | def convertToRelativeValues(size, box):
57 | dw = 1. / (size[0])
58 | dh = 1. / (size[1])
59 | cx = (box[1] + box[0]) / 2.0
60 | cy = (box[3] + box[2]) / 2.0
61 | w = box[1] - box[0]
62 | h = box[3] - box[2]
63 | x = cx * dw
64 | y = cy * dh
65 | w = w * dw
66 | h = h * dh
67 | # x,y => (bounding_box_center)/width_of_the_image
68 | # w => bounding_box_width / width_of_the_image
69 | # h => bounding_box_height / height_of_the_image
70 | return (x, y, w, h)
71 |
72 |
73 | # size => (width, height) of the image
74 | # box => (centerX, centerY, w, h) of the bounding box relative to the image
75 | def convertToAbsoluteValues(size, box):
76 | # w_box = round(size[0] * box[2])
77 | # h_box = round(size[1] * box[3])
78 | xIn = round(((2 * float(box[0]) - float(box[2])) * size[0] / 2))
79 | yIn = round(((2 * float(box[1]) - float(box[3])) * size[1] / 2))
80 | xEnd = xIn + round(float(box[2]) * size[0])
81 | yEnd = yIn + round(float(box[3]) * size[1])
82 | if xIn < 0:
83 | xIn = 0
84 | if yIn < 0:
85 | yIn = 0
86 | if xEnd >= size[0]:
87 | xEnd = size[0] - 1
88 | if yEnd >= size[1]:
89 | yEnd = size[1] - 1
90 | return (xIn, yIn, xEnd, yEnd)
91 |
92 |
93 | def add_bb_into_image(image, bb, color=(255, 0, 0), thickness=2, label=None):
94 | r = int(color[0])
95 | g = int(color[1])
96 | b = int(color[2])
97 |
98 | font = cv2.FONT_HERSHEY_SIMPLEX
99 | fontScale = 0.5
100 | fontThickness = 1
101 |
102 | x1, y1, x2, y2 = bb.getAbsoluteBoundingBox(BBFormat.XYX2Y2)
103 | x1 = int(x1)
104 | y1 = int(y1)
105 | x2 = int(x2)
106 | y2 = int(y2)
107 | cv2.rectangle(image, (x1, y1), (x2, y2), (b, g, r), thickness)
108 | # Add label
109 | if label is not None:
110 | # Get size of the text box
111 | (tw, th) = cv2.getTextSize(label, font, fontScale, fontThickness)[0]
112 | # Top-left coord of the textbox
113 | (xin_bb, yin_bb) = (x1 + thickness, y1 - th + int(12.5 * fontScale))
114 | # Checking position of the text top-left (outside or inside the bb)
115 | if yin_bb - th <= 0: # if outside the image
116 | yin_bb = y1 + th # put it inside the bb
117 | r_Xin = x1 - int(thickness / 2)
118 | r_Yin = y1 - th - int(thickness / 2)
119 | # Draw filled rectangle to put the text in it
120 | cv2.rectangle(image, (r_Xin, r_Yin - thickness),
121 | (r_Xin + tw + thickness * 3, r_Yin + th + int(12.5 * fontScale)), (b, g, r),
122 | -1)
123 | cv2.putText(image, label, (xin_bb, yin_bb), font, fontScale, (0, 0, 0), fontThickness,
124 | cv2.LINE_AA)
125 | return image
126 |
--------------------------------------------------------------------------------
/message.txt:
--------------------------------------------------------------------------------
1 | ####################################################################################################
2 | # #
3 | # THE CURRENT VERSION WAS UPDATED WITH A VISUAL INTERFACE, INCLUDING MORE METRICS AND SUPPORTING #
4 | # OTHER FILE FORMATS. #
5 | # #
6 | # PLEASE ACCESS IT ACCESSED AT: #
7 | # https://github.com/rafaelpadilla/review_object_detection_metrics #
8 | # #
9 | # @Article{electronics10030279, #
10 | # author = {Padilla, Rafael and Passos, Wesley L. and Dias, Thadeu L. B. and Netto, #
11 | # Sergio L. and da Silva, Eduardo A. B.}, #
12 | # title = {A Comparative Analysis of Object Detection Metrics with a Companion #
13 | # Open-Source Toolkit}, #
14 | # journal = {Electronics}, #
15 | # volume = {10}, #
16 | # year = {2021}, #
17 | # number = {3}, #
18 | # article-number = {279}, #
19 | # url = {https://www.mdpi.com/2079-9292/10/3/279}, #
20 | # issn = {2079-9292}, #
21 | # doi = {10.3390/electronics10030279}, } #
22 | # #
23 | ####################################################################################################
--------------------------------------------------------------------------------
/paper_survey_on_performance_metrics_for_object_detection_algorithms.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/rafaelpadilla/Object-Detection-Metrics/dcb285e7dea7e73d9480937d58de0e9bdfc20051/paper_survey_on_performance_metrics_for_object_detection_algorithms.pdf
--------------------------------------------------------------------------------
/pascalvoc.py:
--------------------------------------------------------------------------------
1 | ###########################################################################################
2 | # #
3 | # This sample shows how to evaluate object detections applying the following metrics: #
4 | # * Precision x Recall curve ----> used by VOC PASCAL 2012) #
5 | # * Average Precision (AP) ----> used by VOC PASCAL 2012) #
6 | # #
7 | # Developed by: Rafael Padilla (rafael.padilla@smt.ufrj.br) #
8 | # SMT - Signal Multimedia and Telecommunications Lab #
9 | # COPPE - Universidade Federal do Rio de Janeiro #
10 | # Last modification: Feb 12th 2021 #
11 | ###########################################################################################
12 |
13 | ####################################################################################################
14 | # #
15 | # THE CURRENT VERSION WAS UPDATED WITH A VISUAL INTERFACE, INCLUDING MORE METRICS AND SUPPORTING #
16 | # OTHER FILE FORMATS. PLEASE ACCESS IT ACCESSED AT: #
17 | # #
18 | # https://github.com/rafaelpadilla/review_object_detection_metrics #
19 | # #
20 | # @Article{electronics10030279, #
21 | # author = {Padilla, Rafael and Passos, Wesley L. and Dias, Thadeu L. B. and Netto, #
22 | # Sergio L. and da Silva, Eduardo A. B.}, #
23 | # title = {A Comparative Analysis of Object Detection Metrics with a Companion #
24 | # Open-Source Toolkit}, #
25 | # journal = {Electronics}, #
26 | # volume = {10}, #
27 | # year = {2021}, #
28 | # number = {3}, #
29 | # article-number = {279}, #
30 | # url = {https://www.mdpi.com/2079-9292/10/3/279}, #
31 | # issn = {2079-9292}, #
32 | # doi = {10.3390/electronics10030279}, } #
33 | ####################################################################################################
34 |
35 | ####################################################################################################
36 | # If you use this project, please consider citing: #
37 | # #
38 | # @INPROCEEDINGS {padillaCITE2020, #
39 | # author = {R. {Padilla} and S. L. {Netto} and E. A. B. {da Silva}}, #
40 | # title = {A Survey on Performance Metrics for Object-Detection Algorithms}, #
41 | # booktitle = {2020 International Conference on Systems, Signals and Image Processing (IWSSIP)},#
42 | # year = {2020}, #
43 | # pages = {237-242},} #
44 | # #
45 | # This work is published at: https://github.com/rafaelpadilla/Object-Detection-Metrics #
46 | ####################################################################################################
47 |
48 | import argparse
49 | import glob
50 | import os
51 | import shutil
52 | import sys
53 |
54 | import _init_paths
55 | from BoundingBox import BoundingBox
56 | from BoundingBoxes import BoundingBoxes
57 | from Evaluator import *
58 | from utils import BBFormat
59 |
60 |
61 | # Validate formats
62 | def ValidateFormats(argFormat, argName, errors):
63 | if argFormat == 'xywh':
64 | return BBFormat.XYWH
65 | elif argFormat == 'xyrb':
66 | return BBFormat.XYX2Y2
67 | elif argFormat is None:
68 | return BBFormat.XYWH # default when nothing is passed
69 | else:
70 | errors.append('argument %s: invalid value. It must be either \'xywh\' or \'xyrb\'' %
71 | argName)
72 |
73 |
74 | # Validate mandatory args
75 | def ValidateMandatoryArgs(arg, argName, errors):
76 | if arg is None:
77 | errors.append('argument %s: required argument' % argName)
78 | else:
79 | return True
80 |
81 |
82 | def ValidateImageSize(arg, argName, argInformed, errors):
83 | errorMsg = 'argument %s: required argument if %s is relative' % (argName, argInformed)
84 | ret = None
85 | if arg is None:
86 | errors.append(errorMsg)
87 | else:
88 | arg = arg.replace('(', '').replace(')', '')
89 | args = arg.split(',')
90 | if len(args) != 2:
91 | errors.append('%s. It must be in the format \'width,height\' (e.g. \'600,400\')' %
92 | errorMsg)
93 | else:
94 | if not args[0].isdigit() or not args[1].isdigit():
95 | errors.append(
96 | '%s. It must be in INdiaTEGER the format \'width,height\' (e.g. \'600,400\')' %
97 | errorMsg)
98 | else:
99 | ret = (int(args[0]), int(args[1]))
100 | return ret
101 |
102 |
103 | # Validate coordinate types
104 | def ValidateCoordinatesTypes(arg, argName, errors):
105 | if arg == 'abs':
106 | return CoordinatesType.Absolute
107 | elif arg == 'rel':
108 | return CoordinatesType.Relative
109 | elif arg is None:
110 | return CoordinatesType.Absolute # default when nothing is passed
111 | errors.append('argument %s: invalid value. It must be either \'rel\' or \'abs\'' % argName)
112 |
113 |
114 | def ValidatePaths(arg, nameArg, errors):
115 | if arg is None:
116 | errors.append('argument %s: invalid directory' % nameArg)
117 | elif os.path.isdir(arg) is False and os.path.isdir(os.path.join(currentPath, arg)) is False:
118 | errors.append('argument %s: directory does not exist \'%s\'' % (nameArg, arg))
119 | # elif os.path.isdir(os.path.join(currentPath, arg)) is True:
120 | # arg = os.path.join(currentPath, arg)
121 | else:
122 | arg = os.path.join(currentPath, arg)
123 | return arg
124 |
125 |
126 | def getBoundingBoxes(directory,
127 | isGT,
128 | bbFormat,
129 | coordType,
130 | allBoundingBoxes=None,
131 | allClasses=None,
132 | imgSize=(0, 0)):
133 | """Read txt files containing bounding boxes (ground truth and detections)."""
134 | if allBoundingBoxes is None:
135 | allBoundingBoxes = BoundingBoxes()
136 | if allClasses is None:
137 | allClasses = []
138 | # Read ground truths
139 | os.chdir(directory)
140 | files = glob.glob("*.txt")
141 | files.sort()
142 | # Read GT detections from txt file
143 | # Each line of the files in the groundtruths folder represents a ground truth bounding box
144 | # (bounding boxes that a detector should detect)
145 | # Each value of each line is "class_id, x, y, width, height" respectively
146 | # Class_id represents the class of the bounding box
147 | # x, y represents the most top-left coordinates of the bounding box
148 | # x2, y2 represents the most bottom-right coordinates of the bounding box
149 | for f in files:
150 | nameOfImage = f.replace(".txt", "")
151 | fh1 = open(f, "r")
152 | for line in fh1:
153 | line = line.replace("\n", "")
154 | if line.replace(' ', '') == '':
155 | continue
156 | splitLine = line.split(" ")
157 | if isGT:
158 | # idClass = int(splitLine[0]) #class
159 | idClass = (splitLine[0]) # class
160 | x = float(splitLine[1])
161 | y = float(splitLine[2])
162 | w = float(splitLine[3])
163 | h = float(splitLine[4])
164 | bb = BoundingBox(nameOfImage,
165 | idClass,
166 | x,
167 | y,
168 | w,
169 | h,
170 | coordType,
171 | imgSize,
172 | BBType.GroundTruth,
173 | format=bbFormat)
174 | else:
175 | # idClass = int(splitLine[0]) #class
176 | idClass = (splitLine[0]) # class
177 | confidence = float(splitLine[1])
178 | x = float(splitLine[2])
179 | y = float(splitLine[3])
180 | w = float(splitLine[4])
181 | h = float(splitLine[5])
182 | bb = BoundingBox(nameOfImage,
183 | idClass,
184 | x,
185 | y,
186 | w,
187 | h,
188 | coordType,
189 | imgSize,
190 | BBType.Detected,
191 | confidence,
192 | format=bbFormat)
193 | allBoundingBoxes.addBoundingBox(bb)
194 | if idClass not in allClasses:
195 | allClasses.append(idClass)
196 | fh1.close()
197 | return allBoundingBoxes, allClasses
198 |
199 |
200 | # Get current path to set default folders
201 | currentPath = os.path.dirname(os.path.abspath(__file__))
202 |
203 | VERSION = '0.2 (beta)'
204 |
205 | with open('message.txt', 'r') as f:
206 | message = f'\n\n{f.read()}\n\n'
207 |
208 | print(message)
209 |
210 | parser = argparse.ArgumentParser(
211 | prog='Object Detection Metrics - Pascal VOC',
212 | description=
213 | f'{message}\nThis project applies the most popular metrics used to evaluate object detection '
214 | 'algorithms.\nThe current implemention runs the Pascal VOC metrics.\nFor further references, '
215 | 'please check:\nhttps://github.com/rafaelpadilla/Object-Detection-Metrics',
216 | epilog="Developed by: Rafael Padilla (rafael.padilla@smt.ufrj.br)")
217 | parser.add_argument('-v', '--version', action='version', version='%(prog)s ' + VERSION)
218 | # Positional arguments
219 | # Mandatory
220 | parser.add_argument('-gt',
221 | '--gtfolder',
222 | dest='gtFolder',
223 | default=os.path.join(currentPath, 'groundtruths'),
224 | metavar='',
225 | help='folder containing your ground truth bounding boxes')
226 | parser.add_argument('-det',
227 | '--detfolder',
228 | dest='detFolder',
229 | default=os.path.join(currentPath, 'detections'),
230 | metavar='',
231 | help='folder containing your detected bounding boxes')
232 | # Optional
233 | parser.add_argument('-t',
234 | '--threshold',
235 | dest='iouThreshold',
236 | type=float,
237 | default=0.5,
238 | metavar='',
239 | help='IOU threshold. Default 0.5')
240 | parser.add_argument('-gtformat',
241 | dest='gtFormat',
242 | metavar='',
243 | default='xywh',
244 | help='format of the coordinates of the ground truth bounding boxes: '
245 | '(\'xywh\': )'
246 | ' or (\'xyrb\': )')
247 | parser.add_argument('-detformat',
248 | dest='detFormat',
249 | metavar='',
250 | default='xywh',
251 | help='format of the coordinates of the detected bounding boxes '
252 | '(\'xywh\': ) '
253 | 'or (\'xyrb\': )')
254 | parser.add_argument('-gtcoords',
255 | dest='gtCoordinates',
256 | default='abs',
257 | metavar='',
258 | help='reference of the ground truth bounding box coordinates: absolute '
259 | 'values (\'abs\') or relative to its image size (\'rel\')')
260 | parser.add_argument('-detcoords',
261 | default='abs',
262 | dest='detCoordinates',
263 | metavar='',
264 | help='reference of the ground truth bounding box coordinates: '
265 | 'absolute values (\'abs\') or relative to its image size (\'rel\')')
266 | parser.add_argument('-imgsize',
267 | dest='imgSize',
268 | metavar='',
269 | help='image size. Required if -gtcoords or -detcoords are \'rel\'')
270 | parser.add_argument('-sp',
271 | '--savepath',
272 | dest='savePath',
273 | metavar='',
274 | help='folder where the plots are saved')
275 | parser.add_argument('-np',
276 | '--noplot',
277 | dest='showPlot',
278 | action='store_false',
279 | help='no plot is shown during execution')
280 | args = parser.parse_args()
281 |
282 | iouThreshold = args.iouThreshold
283 |
284 | # Arguments validation
285 | errors = []
286 | # Validate formats
287 | gtFormat = ValidateFormats(args.gtFormat, '-gtformat', errors)
288 | detFormat = ValidateFormats(args.detFormat, '-detformat', errors)
289 | # Groundtruth folder
290 | if ValidateMandatoryArgs(args.gtFolder, '-gt/--gtfolder', errors):
291 | gtFolder = ValidatePaths(args.gtFolder, '-gt/--gtfolder', errors)
292 | else:
293 | # errors.pop()
294 | gtFolder = os.path.join(currentPath, 'groundtruths')
295 | if os.path.isdir(gtFolder) is False:
296 | errors.append('folder %s not found' % gtFolder)
297 | # Coordinates types
298 | gtCoordType = ValidateCoordinatesTypes(args.gtCoordinates, '-gtCoordinates', errors)
299 | detCoordType = ValidateCoordinatesTypes(args.detCoordinates, '-detCoordinates', errors)
300 | imgSize = (0, 0)
301 | if gtCoordType == CoordinatesType.Relative: # Image size is required
302 | imgSize = ValidateImageSize(args.imgSize, '-imgsize', '-gtCoordinates', errors)
303 | if detCoordType == CoordinatesType.Relative: # Image size is required
304 | imgSize = ValidateImageSize(args.imgSize, '-imgsize', '-detCoordinates', errors)
305 | # Detection folder
306 | if ValidateMandatoryArgs(args.detFolder, '-det/--detfolder', errors):
307 | detFolder = ValidatePaths(args.detFolder, '-det/--detfolder', errors)
308 | else:
309 | # errors.pop()
310 | detFolder = os.path.join(currentPath, 'detections')
311 | if os.path.isdir(detFolder) is False:
312 | errors.append('folder %s not found' % detFolder)
313 | if args.savePath is not None:
314 | savePath = ValidatePaths(args.savePath, '-sp/--savepath', errors)
315 | else:
316 | savePath = os.path.join(currentPath, 'results')
317 | # Validate savePath
318 | # If error, show error messages
319 | if len(errors) != 0:
320 | print("""usage: Object Detection Metrics [-h] [-v] [-gt] [-det] [-t] [-gtformat]
321 | [-detformat] [-save]""")
322 | print('Object Detection Metrics: error(s): ')
323 | [print(e) for e in errors]
324 | sys.exit()
325 |
326 | # Check if path to save results already exists and is not empty
327 | if os.path.isdir(savePath) and os.listdir(savePath):
328 | key_pressed = ''
329 | while key_pressed.upper() not in ['Y', 'N']:
330 | print(f'Folder {savePath} already exists and may contain important results.\n')
331 | print(f'Enter \'Y\' to continue. WARNING: THIS WILL REMOVE ALL THE CONTENTS OF THE FOLDER!')
332 | print(f'Or enter \'N\' to abort and choose another folder to save the results.')
333 | key_pressed = input('')
334 |
335 | if key_pressed.upper() == 'N':
336 | print('Process canceled')
337 | sys.exit()
338 |
339 | # Clear folder and save results
340 | shutil.rmtree(savePath, ignore_errors=True)
341 | os.makedirs(savePath)
342 | # Show plot during execution
343 | showPlot = args.showPlot
344 |
345 | # print('iouThreshold= %f' % iouThreshold)
346 | # print('savePath = %s' % savePath)
347 | # print('gtFormat = %s' % gtFormat)
348 | # print('detFormat = %s' % detFormat)
349 | # print('gtFolder = %s' % gtFolder)
350 | # print('detFolder = %s' % detFolder)
351 | # print('gtCoordType = %s' % gtCoordType)
352 | # print('detCoordType = %s' % detCoordType)
353 | # print('showPlot %s' % showPlot)
354 |
355 | # Get groundtruth boxes
356 | allBoundingBoxes, allClasses = getBoundingBoxes(gtFolder,
357 | True,
358 | gtFormat,
359 | gtCoordType,
360 | imgSize=imgSize)
361 | # Get detected boxes
362 | allBoundingBoxes, allClasses = getBoundingBoxes(detFolder,
363 | False,
364 | detFormat,
365 | detCoordType,
366 | allBoundingBoxes,
367 | allClasses,
368 | imgSize=imgSize)
369 | allClasses.sort()
370 |
371 | evaluator = Evaluator()
372 | acc_AP = 0
373 | validClasses = 0
374 |
375 | # Plot Precision x Recall curve
376 | detections = evaluator.PlotPrecisionRecallCurve(
377 | allBoundingBoxes, # Object containing all bounding boxes (ground truths and detections)
378 | IOUThreshold=iouThreshold, # IOU threshold
379 | method=MethodAveragePrecision.EveryPointInterpolation,
380 | showAP=True, # Show Average Precision in the title of the plot
381 | showInterpolatedPrecision=False, # Don't plot the interpolated precision curve
382 | savePath=savePath,
383 | showGraphic=showPlot)
384 |
385 | f = open(os.path.join(savePath, 'results.txt'), 'w')
386 | f.write('Object Detection Metrics\n')
387 | f.write('https://github.com/rafaelpadilla/Object-Detection-Metrics\n\n\n')
388 | f.write('Average Precision (AP), Precision and Recall per class:')
389 |
390 | # each detection is a class
391 | for metricsPerClass in detections:
392 |
393 | # Get metric values per each class
394 | cl = metricsPerClass['class']
395 | ap = metricsPerClass['AP']
396 | precision = metricsPerClass['precision']
397 | recall = metricsPerClass['recall']
398 | totalPositives = metricsPerClass['total positives']
399 | total_TP = metricsPerClass['total TP']
400 | total_FP = metricsPerClass['total FP']
401 |
402 | if totalPositives > 0:
403 | validClasses = validClasses + 1
404 | acc_AP = acc_AP + ap
405 | prec = ['%.2f' % p for p in precision]
406 | rec = ['%.2f' % r for r in recall]
407 | ap_str = "{0:.2f}%".format(ap * 100)
408 | # ap_str = "{0:.4f}%".format(ap * 100)
409 | print('AP: %s (%s)' % (ap_str, cl))
410 | f.write('\n\nClass: %s' % cl)
411 | f.write('\nAP: %s' % ap_str)
412 | f.write('\nPrecision: %s' % prec)
413 | f.write('\nRecall: %s' % rec)
414 |
415 | mAP = acc_AP / validClasses
416 | mAP_str = "{0:.2f}%".format(mAP * 100)
417 | print('mAP: %s' % mAP_str)
418 | f.write('\n\n\nmAP: %s' % mAP_str)
419 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | certifi==2019.11.28
2 | cycler==0.10.0
3 | kiwisolver==1.1.0
4 | matplotlib==3.1.3
5 | numpy==1.22.0
6 | pyparsing==2.4.6
7 | PyQt5==5.12.3
8 | PyQt5-sip==4.19.18
9 | PyQtWebEngine==5.12.1
10 | python-dateutil==2.8.1
11 | six==1.14.0
12 | tornado==6.0.3
13 | opencv-python==4.2.0.32
14 |
--------------------------------------------------------------------------------
/results/person.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/rafaelpadilla/Object-Detection-Metrics/dcb285e7dea7e73d9480937d58de0e9bdfc20051/results/person.png
--------------------------------------------------------------------------------
/results/person_11-pointInterpolation.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/rafaelpadilla/Object-Detection-Metrics/dcb285e7dea7e73d9480937d58de0e9bdfc20051/results/person_11-pointInterpolation.png
--------------------------------------------------------------------------------
/results/results.txt:
--------------------------------------------------------------------------------
1 | Object Detection Metrics
2 | https://github.com/rafaelpadilla/Object-Detection-Metrics
3 |
4 |
5 | Average Precision (AP), Precision and Recall per class:
6 |
7 | Class: person
8 | AP: 24.57%
9 | Precision: ['1.00', '0.50', '0.67', '0.50', '0.40', '0.33', '0.29', '0.25', '0.22', '0.30', '0.27', '0.33', '0.38', '0.43', '0.40', '0.38', '0.35', '0.33', '0.32', '0.30', '0.29', '0.27', '0.30', '0.29']
10 | Recall: ['0.07', '0.07', '0.13', '0.13', '0.13', '0.13', '0.13', '0.13', '0.13', '0.20', '0.20', '0.27', '0.33', '0.40', '0.40', '0.40', '0.40', '0.40', '0.40', '0.40', '0.40', '0.40', '0.47', '0.47']
11 |
12 |
13 | mAP: 24.57%
--------------------------------------------------------------------------------
/samples/sample_1/README.md:
--------------------------------------------------------------------------------
1 | # Sample 1
2 |
3 | This sample was created to illustrate the usage of the classes **BoundingBox** and **BoundingBoxes**. Objects of the class `BoundingBox` are an abstraction of the detections or the ground truth boxes. The object of the class `BoundingBoxes` is used by evaluation methods and represents a collection of bounding boxes.
4 |
5 | The full code can be accessed [here](https://github.com/rafaelpadilla/Object-Detection-Metrics/blob/master/samples/sample_1/sample_1.py).
6 |
7 | If you just want to evaluate your detections dealing with a high level interface, just check the instructions [here](https://github.com/rafaelpadilla/Object-Detection-Metrics/blob/master/README.md#how-to-use-this-project).
8 |
9 | ### The code
10 |
11 | The classes BoudingBox and BoundingBoxes are in the `lib/` folder. The file `_init_paths.py` imports these contents into our example. The file `utils.py` contains basically enumerators and useful functions. The code below shows how to import them:
12 |
13 | ```python
14 | import _init_paths
15 | from utils import *
16 | from BoundingBox import BoundingBox
17 | from BoundingBoxes import BoundingBoxes
18 | ```
19 | Don't forget to put the content of the folder `\lib` in the same folder of your code.
20 |
21 | All bounding boxes (detected and ground truth) are represented by objects of the class `BoundingBox`. Each bounding box is created using the constructor. Use the parameter `bbType` to identify if the box is a ground truth or a detected one. The parameter `imageName` determines the image that the box belongs to. All the parameters used to create the object are:
22 |
23 | * `imageName`: String representing the image name.
24 | * `classId`: String value representing class id (e.g. 'house', 'dog', 'person')
25 | * `x`: Float value representing the X upper-left coordinate of the bounding box.
26 | * `y`: Float value representing the Y upper-left coordinate of the bounding box.
27 | * `w`: Float value representing the width bounding box. It can also be used to represent the X lower-right coordinate of the bounding box. For that, use the parameter `format=BBFormat.XYX2Y2`.
28 | * `h`: Float value representing the height bounding box. It can also be used to represent the Y lower-right coordinate of the bounding box. For that, use the parameter `format=BBFormat.XYX2Y2`.
29 | * `typeCoordinates`: (optional) Enum (`CoordinatesType.Relative` or `CoordinatesType.Absolute`) representing if the bounding box coordinates (x,y,w,h) are absolute or relative to size of the image. Default: 'Absolute'. Some projects like YOLO identifies the detected bounding boxes as being relative to the image size, it may be useful for cases like that. Note that if the coordinate type is relative, the `imgSize` parameter is required.
30 | * `imgSize`: (optional) 2D vector (width, height)=>(int, int) representing the size of the image of the bounding box. If `typeCoordinates=CoordinatesType.Relative`, the parameter `imgSize` is required.
31 | * `bbType`: (optional) Enum (`bbType=BBType.Groundtruth` or `bbType=BBType.Detection`) identifies if the bounding box represents a ground truth or a detection. Not that if it is a detection, the classConfidence has to be informed.
32 | * `classConfidence`: (optional) Float value representing the confidence of the detected class. If detectionType is Detection, classConfidence needs to be informed.
33 | * `format`: (optional) Enum (`BBFormat.XYWH` or `BBFormat.XYX2Y2`) indicating the format of the coordinates of the bounding boxes. If `format=BBFormat.XYWH`, the parameters `x`,`y`,`w` and `h` are: \, \, \ and \ respectively. If `format=BBFormat.XYX2Y2`, the parameters `x`,`y`,`w` and `h` are: \, \, \ and \ respectively.
34 |
35 | **Attention**: The bounding boxes of the same image (detections or ground truth) must have have the same `imageName`.
36 |
37 | The snippet below shows the creation of bounding boxes of 3 different images (000001.jpg, 000002.jpg and 000003.jpg) containing 2, 1 and 1 ground truth objects to be detected respectively. There are 3 detected bounding boxes, one at each image. The images are available in the folder [sample_1/images/detections/](https://github.com/rafaelpadilla/Object-Detection-Metrics/tree/master/samples/sample_1/images/detections) and [sample_1/images/groundtruths/](https://github.com/rafaelpadilla/Object-Detection-Metrics/tree/master/samples/sample_1/images/groundtruths).
38 |
39 | ```python
40 | # Ground truth bounding boxes of 000001.jpg
41 | gt_boundingBox_1 = BoundingBox(imageName='000001', classId='dog', x=0.34419263456090654, y=0.611,
42 | w=0.4164305949008499, h=0.262, typeCoordinates=CoordinatesType.Relative,
43 | bbType=BBType.GroundTruth, format=BBFormat.XYWH, imgSize=(353,500))
44 | gt_boundingBox_2 = BoundingBox(imageName='000001', classId='person', x=0.509915014164306, y=0.51,
45 | w=0.9745042492917847, h=0.972, typeCoordinates=CoordinatesType.Relative,
46 | bbType=BBType.GroundTruth, format=BBFormat.XYWH, imgSize=(353,500))
47 | # Ground truth bounding boxes of 000002.jpg
48 | gt_boundingBox_3 = BoundingBox(imageName='000002', classId='train', x=0.5164179104477612, y=0.501,
49 | w=0.20298507462686569, h=0.202, typeCoordinates=CoordinatesType.Relative,
50 | bbType=BBType.GroundTruth, format=BBFormat.XYWH, imgSize=(335,500))
51 | # Ground truth bounding boxes of 000003.jpg
52 | gt_boundingBox_4 = BoundingBox(imageName='000003', classId='bench', x=0.338, y=0.4666666666666667,
53 | w=0.184, h=0.10666666666666666, typeCoordinates=CoordinatesType.Relative,
54 | bbType=BBType.GroundTruth, format=BBFormat.XYWH, imgSize=(500,375))
55 | gt_boundingBox_5 = BoundingBox(imageName='000003', classId='bench', x=0.546, y=0.48133333333333334,
56 | w=0.136, h=0.13066666666666665, typeCoordinates=CoordinatesType.Relative,
57 | bbType=BBType.GroundTruth, format=BBFormat.XYWH, imgSize=(500,375))
58 | # Detected bounding boxes of 000001.jpg
59 | detected_boundingBox_1 = BoundingBox(imageName='000001', classId='person', classConfidence= 0.893202,
60 | x=52, y=4, w=352, h=442, typeCoordinates=CoordinatesType.Absolute,
61 | bbType=BBType.Detected, format=BBFormat.XYX2Y2, imgSize=(353,500))
62 | # Detected bounding boxes of 000002.jpg
63 | detected_boundingBox_2 = BoundingBox(imageName='000002', classId='train', classConfidence=0.863700,
64 | x=140, y=195, w=209, h=293, typeCoordinates=CoordinatesType.Absolute,
65 | bbType=BBType.Detected, format=BBFormat.XYX2Y2, imgSize=(335,500))
66 | # Detected bounding boxes of 000003.jpg
67 | detected_boundingBox_3 = BoundingBox(imageName='000003', classId='bench', classConfidence=0.278000,
68 | x=388, y=288, w=493, h=331, typeCoordinates=CoordinatesType.Absolute,
69 | bbType=BBType.Detected, format=BBFormat.XYX2Y2, imgSize=(500,375))
70 | ```
71 |
72 | The object `BoundingBoxes` represents a collection of the bounding boxes (ground truth and detected). Evaluation methods of the class `Evaluator` use the `BoundingBoxes` object to apply the metrics. The following code shows how to add the bounding boxes to the collection:
73 |
74 | ```python
75 | # Creating the object of the class BoundingBoxes
76 | myBoundingBoxes = BoundingBoxes()
77 | # Add all bounding boxes to the BoundingBoxes object:
78 | myBoundingBoxes.addBoundingBox(gt_boundingBox_1)
79 | myBoundingBoxes.addBoundingBox(gt_boundingBox_2)
80 | myBoundingBoxes.addBoundingBox(gt_boundingBox_3)
81 | myBoundingBoxes.addBoundingBox(gt_boundingBox_4)
82 | myBoundingBoxes.addBoundingBox(gt_boundingBox_5)
83 | myBoundingBoxes.addBoundingBox(detected_boundingBox_1)
84 | myBoundingBoxes.addBoundingBox(detected_boundingBox_2)
85 | myBoundingBoxes.addBoundingBox(detected_boundingBox_3)
86 | ```
87 |
88 | You can use the method `drawAllBoundingBoxes(image, imageName)` to add ground truth bounding boxes (in green) and detected bounding boxes (in red) into your images:
89 |
90 | ```python
91 | import cv2
92 | import numpy as np
93 | import os
94 | currentPath = os.path.dirname(os.path.realpath(__file__))
95 | gtImages = ['000001', '000002', '000003']
96 | for imageName in gtImages:
97 | im = cv2.imread(os.path.join(currentPath,'images','groundtruths',imageName)+'.jpg')
98 | # Add bounding boxes
99 | im = myBoundingBoxes.drawAllBoundingBoxes(im, imageName)
100 | # Uncomment the lines below if you want to show the images
101 | #cv2.imshow(imageName+'.jpg', im)
102 | #cv2.waitKey(0)
103 | cv2.imwrite(os.path.join(currentPath,'images',imageName+'.jpg'),im)
104 | print('Image %s created successfully!' % imageName)
105 | ```
106 |
107 | Results:
108 |
109 |
110 |
111 |
/>
112 |
/>
113 |
/>
114 |
115 |
116 | **Of course you won't build your bounding boxes one by one as done in this example.** You should read your detections within a loop and create your bounding boxes inside of it. [Sample_2](https://github.com/rafaelpadilla/Object-Detection-Metrics/tree/master/samples/sample_2) demonstrates how to read detections from folders containing .txt files.
117 |
--------------------------------------------------------------------------------
/samples/sample_1/_init_paths.py:
--------------------------------------------------------------------------------
1 | ###########################################################################################
2 | # #
3 | # Set up paths for the Object Detection Metrics #
4 | # #
5 | # Developed by: Rafael Padilla (rafael.padilla@smt.ufrj.br) #
6 | # SMT - Signal Multimedia and Telecommunications Lab #
7 | # COPPE - Universidade Federal do Rio de Janeiro #
8 | # Last modification: May 24th 2018 #
9 | ###########################################################################################
10 |
11 | import sys
12 | import os
13 |
14 |
15 | def add_path(path):
16 | if path not in sys.path:
17 | sys.path.insert(0, path)
18 |
19 |
20 | currentPath = os.path.dirname(os.path.realpath(__file__))
21 |
22 | # Add lib to PYTHONPATH
23 | libPath = os.path.join(currentPath, '..', '..', 'lib')
24 | add_path(libPath)
25 |
--------------------------------------------------------------------------------
/samples/sample_1/images/000001.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/rafaelpadilla/Object-Detection-Metrics/dcb285e7dea7e73d9480937d58de0e9bdfc20051/samples/sample_1/images/000001.jpg
--------------------------------------------------------------------------------
/samples/sample_1/images/000002.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/rafaelpadilla/Object-Detection-Metrics/dcb285e7dea7e73d9480937d58de0e9bdfc20051/samples/sample_1/images/000002.jpg
--------------------------------------------------------------------------------
/samples/sample_1/images/000003.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/rafaelpadilla/Object-Detection-Metrics/dcb285e7dea7e73d9480937d58de0e9bdfc20051/samples/sample_1/images/000003.jpg
--------------------------------------------------------------------------------
/samples/sample_1/images/detections/000001.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/rafaelpadilla/Object-Detection-Metrics/dcb285e7dea7e73d9480937d58de0e9bdfc20051/samples/sample_1/images/detections/000001.png
--------------------------------------------------------------------------------
/samples/sample_1/images/detections/000002.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/rafaelpadilla/Object-Detection-Metrics/dcb285e7dea7e73d9480937d58de0e9bdfc20051/samples/sample_1/images/detections/000002.png
--------------------------------------------------------------------------------
/samples/sample_1/images/detections/000003.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/rafaelpadilla/Object-Detection-Metrics/dcb285e7dea7e73d9480937d58de0e9bdfc20051/samples/sample_1/images/detections/000003.png
--------------------------------------------------------------------------------
/samples/sample_1/images/groundtruths/000001.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/rafaelpadilla/Object-Detection-Metrics/dcb285e7dea7e73d9480937d58de0e9bdfc20051/samples/sample_1/images/groundtruths/000001.jpg
--------------------------------------------------------------------------------
/samples/sample_1/images/groundtruths/000002.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/rafaelpadilla/Object-Detection-Metrics/dcb285e7dea7e73d9480937d58de0e9bdfc20051/samples/sample_1/images/groundtruths/000002.jpg
--------------------------------------------------------------------------------
/samples/sample_1/images/groundtruths/000003.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/rafaelpadilla/Object-Detection-Metrics/dcb285e7dea7e73d9480937d58de0e9bdfc20051/samples/sample_1/images/groundtruths/000003.jpg
--------------------------------------------------------------------------------
/samples/sample_1/sample_1.py:
--------------------------------------------------------------------------------
1 | ###########################################################################################
2 | # #
3 | # This sample demonstrates: #
4 | # * How to create your own bounding boxes (detections and ground truth) manually; #
5 | # * Ground truth bounding boxes are drawn in green and detected boxes are drawn in red; #
6 | # * Create objects of the class BoundingBoxes with your bounding boxes; #
7 | # * Create images with detections and ground truth; #
8 | # #
9 | # Developed by: Rafael Padilla (rafael.padilla@smt.ufrj.br) #
10 | # SMT - Signal Multimedia and Telecommunications Lab #
11 | # COPPE - Universidade Federal do Rio de Janeiro #
12 | # Last modification: May 24th 2018 #
13 | ###########################################################################################
14 |
15 | import os
16 |
17 | import _init_paths
18 | import cv2
19 | from BoundingBox import BoundingBox
20 | from BoundingBoxes import BoundingBoxes
21 | from utils import *
22 |
23 | ###########################
24 | # Defining bounding boxes #
25 | ###########################
26 | # Ground truth bounding boxes of 000001.jpg
27 | gt_boundingBox_1 = BoundingBox(
28 | imageName='000001',
29 | classId='dog',
30 | x=0.34419263456090654,
31 | y=0.611,
32 | w=0.4164305949008499,
33 | h=0.262,
34 | typeCoordinates=CoordinatesType.Relative,
35 | bbType=BBType.GroundTruth,
36 | format=BBFormat.XYWH,
37 | imgSize=(353, 500))
38 | gt_boundingBox_2 = BoundingBox(
39 | imageName='000001',
40 | classId='person',
41 | x=0.509915014164306,
42 | y=0.51,
43 | w=0.9745042492917847,
44 | h=0.972,
45 | typeCoordinates=CoordinatesType.Relative,
46 | bbType=BBType.GroundTruth,
47 | format=BBFormat.XYWH,
48 | imgSize=(353, 500))
49 | # Ground truth bounding boxes of 000002.jpg
50 | gt_boundingBox_3 = BoundingBox(
51 | imageName='000002',
52 | classId='train',
53 | x=0.5164179104477612,
54 | y=0.501,
55 | w=0.20298507462686569,
56 | h=0.202,
57 | typeCoordinates=CoordinatesType.Relative,
58 | bbType=BBType.GroundTruth,
59 | format=BBFormat.XYWH,
60 | imgSize=(335, 500))
61 | # Ground truth bounding boxes of 000003.jpg
62 | gt_boundingBox_4 = BoundingBox(
63 | imageName='000003',
64 | classId='bench',
65 | x=0.338,
66 | y=0.4666666666666667,
67 | w=0.184,
68 | h=0.10666666666666666,
69 | typeCoordinates=CoordinatesType.Relative,
70 | bbType=BBType.GroundTruth,
71 | format=BBFormat.XYWH,
72 | imgSize=(500, 375))
73 | gt_boundingBox_5 = BoundingBox(
74 | imageName='000003',
75 | classId='bench',
76 | x=0.546,
77 | y=0.48133333333333334,
78 | w=0.136,
79 | h=0.13066666666666665,
80 | typeCoordinates=CoordinatesType.Relative,
81 | bbType=BBType.GroundTruth,
82 | format=BBFormat.XYWH,
83 | imgSize=(500, 375))
84 | # Detected bounding boxes of 000001.jpg
85 | detected_boundingBox_1 = BoundingBox(
86 | imageName='000001',
87 | classId='person',
88 | classConfidence=0.893202,
89 | x=52,
90 | y=4,
91 | w=352,
92 | h=442,
93 | typeCoordinates=CoordinatesType.Absolute,
94 | bbType=BBType.Detected,
95 | format=BBFormat.XYX2Y2,
96 | imgSize=(353, 500))
97 | # Detected bounding boxes of 000002.jpg
98 | detected_boundingBox_2 = BoundingBox(
99 | imageName='000002',
100 | classId='train',
101 | classConfidence=0.863700,
102 | x=140,
103 | y=195,
104 | w=209,
105 | h=293,
106 | typeCoordinates=CoordinatesType.Absolute,
107 | bbType=BBType.Detected,
108 | format=BBFormat.XYX2Y2,
109 | imgSize=(335, 500))
110 | # Detected bounding boxes of 000003.jpg
111 | detected_boundingBox_3 = BoundingBox(
112 | imageName='000003',
113 | classId='bench',
114 | classConfidence=0.278000,
115 | x=388,
116 | y=288,
117 | w=493,
118 | h=331,
119 | typeCoordinates=CoordinatesType.Absolute,
120 | bbType=BBType.Detected,
121 | format=BBFormat.XYX2Y2,
122 | imgSize=(500, 375))
123 | # Creating the object of the class BoundingBoxes
124 | myBoundingBoxes = BoundingBoxes()
125 | # Add all bounding boxes to the BoundingBoxes object:
126 | myBoundingBoxes.addBoundingBox(gt_boundingBox_1)
127 | myBoundingBoxes.addBoundingBox(gt_boundingBox_2)
128 | myBoundingBoxes.addBoundingBox(gt_boundingBox_3)
129 | myBoundingBoxes.addBoundingBox(gt_boundingBox_4)
130 | myBoundingBoxes.addBoundingBox(gt_boundingBox_5)
131 | myBoundingBoxes.addBoundingBox(detected_boundingBox_1)
132 | myBoundingBoxes.addBoundingBox(detected_boundingBox_2)
133 | myBoundingBoxes.addBoundingBox(detected_boundingBox_3)
134 |
135 | ###################
136 | # Creating images #
137 | ###################
138 | currentPath = os.path.dirname(os.path.realpath(__file__))
139 | gtImages = ['000001', '000002', '000003']
140 | for imageName in gtImages:
141 | im = cv2.imread(os.path.join(currentPath, 'images', 'groundtruths', imageName) + '.jpg')
142 | # Add bounding boxes
143 | im = myBoundingBoxes.drawAllBoundingBoxes(im, imageName)
144 | # cv2.imshow(imageName+'.jpg', im)
145 | # cv2.waitKey(0)
146 | cv2.imwrite(os.path.join(currentPath, 'images', imageName + '.jpg'), im)
147 | print('Image %s created successfully!' % imageName)
148 |
--------------------------------------------------------------------------------
/samples/sample_2/README.md:
--------------------------------------------------------------------------------
1 | # Sample 2
2 |
3 | This sample was created for those who want to understand more about the metric functions of this project. If you just want to evaluate your detections dealing with a high level interface, just check the instructions [here](https://github.com/rafaelpadilla/Object-Detection-Metrics/blob/master/README.md#how-to-use-this-project).
4 |
5 | In order to reproduce the results of this code **using the high level interface**, just navigate to the folder where the ```pascalvoc.py``` is and run the following command: ```python pascalvoc.py -t 0.3```
6 |
7 | or if you want to be more complete: ```python pascalvoc.py -gt groundtruths/ -det detections/ -t 0.3```
8 |
9 | or if you want to use [relative coordinates](https://github.com/rafaelpadilla/Object-Detection-Metrics/tree/master/detections_rel) (like retrieved results from YOLO), you need to inform that the detected coordinates are relative, specify the image size:
10 | ```python pascalvoc.py -gt groundtruths/ -det detections_rel/ -detcoords rel -imgsize 200,200 -t 0.3```
11 |
12 | Or if you want to play a little bit with this project, follow the steps below:
13 |
14 | ### Evaluation Metrics
15 |
16 | First we need to represent each bounding box with the class `BoundingBox`. The function `getBoundingBoxes` reads .txt files containing the coordinates of the [detected](https://github.com/rafaelpadilla/Object-Detection-Metrics/tree/master/samples/sample_2/detections) and [ground truth](https://github.com/rafaelpadilla/Object-Detection-Metrics/tree/master/samples/sample_2/groundtruths) bounding boxes and creates a `BoundingBox` object for each of them. Then, it gathers all boxes in the `BoundingBoxes` object and returns it:
17 |
18 | ```python
19 | def getBoundingBoxes():
20 | """Read txt files containing bounding boxes (ground truth and detections)."""
21 | allBoundingBoxes = BoundingBoxes()
22 | import glob
23 | import os
24 | # Read ground truths
25 | currentPath = os.path.dirname(os.path.abspath(__file__))
26 | folderGT = os.path.join(currentPath,'groundtruths')
27 | os.chdir(folderGT)
28 | files = glob.glob("*.txt")
29 | files.sort()
30 | # Class representing bounding boxes (ground truths and detections)
31 | allBoundingBoxes = BoundingBoxes()
32 | # Read GT detections from txt file
33 | # Each line of the files in the groundtruths folder represents a ground truth bounding box (bounding boxes that a detector should detect)
34 | # Each value of each line is "class_id, x, y, width, height" respectively
35 | # Class_id represents the class of the bounding box
36 | # x, y represents the most top-left coordinates of the bounding box
37 | # x2, y2 represents the most bottom-right coordinates of the bounding box
38 | for f in files:
39 | nameOfImage = f.replace(".txt","")
40 | fh1 = open(f, "r")
41 | for line in fh1:
42 | line = line.replace("\n","")
43 | if line.replace(' ','') == '':
44 | continue
45 | splitLine = line.split(" ")
46 | idClass = splitLine[0] #class
47 | x = float(splitLine[1]) #confidence
48 | y = float(splitLine[2])
49 | w = float(splitLine[3])
50 | h = float(splitLine[4])
51 | bb = BoundingBox(nameOfImage,idClass,x,y,w,h,CoordinatesType.Absolute, (200,200), BBType.GroundTruth, format=BBFormat.XYWH)
52 | allBoundingBoxes.addBoundingBox(bb)
53 | fh1.close()
54 | # Read detections
55 | folderDet = os.path.join(currentPath,'detections')
56 | os.chdir(folderDet)
57 | files = glob.glob("*.txt")
58 | files.sort()
59 | # Read detections from txt file
60 | # Each line of the files in the detections folder represents a detected bounding box.
61 | # Each value of each line is "class_id, confidence, x, y, width, height" respectively
62 | # Class_id represents the class of the detected bounding box
63 | # Confidence represents the confidence (from 0 to 1) that this detection belongs to the class_id.
64 | # x, y represents the most top-left coordinates of the bounding box
65 | # x2, y2 represents the most bottom-right coordinates of the bounding box
66 | for f in files:
67 | # nameOfImage = f.replace("_det.txt","")
68 | nameOfImage = f.replace(".txt","")
69 | # Read detections from txt file
70 | fh1 = open(f, "r")
71 | for line in fh1:
72 | line = line.replace("\n","")
73 | if line.replace(' ','') == '':
74 | continue
75 | splitLine = line.split(" ")
76 | idClass = splitLine[0] #class
77 | confidence = float(splitLine[1]) #confidence
78 | x = float(splitLine[2])
79 | y = float(splitLine[3])
80 | w = float(splitLine[4])
81 | h = float(splitLine[5])
82 | bb = BoundingBox(nameOfImage, idClass,x,y,w,h,CoordinatesType.Absolute, (200,200), BBType.Detected, confidence, format=BBFormat.XYWH)
83 | allBoundingBoxes.addBoundingBox(bb)
84 | fh1.close()
85 | return allBoundingBoxes
86 |
87 | # Read txt files containing bounding boxes (ground truth and detections)
88 | boundingboxes = getBoundingBoxes()
89 | ```
90 |
91 | Note that the text files contain one bounding box per line in the format:
92 |
93 | **\ \ \ \ \**: For ground truth files.
94 |
95 | **\ \ \ \ \ \**: For detection files.
96 |
97 | The next step is to create an `Evaluator` object that provides us the metrics:
98 |
99 | ```python
100 | # Create an evaluator object in order to obtain the metrics
101 | evaluator = Evaluator()
102 | ```
103 |
104 | With the ```evaluator``` object, you will have access to methods that retrieve the metrics:
105 |
106 | | Method | Description | Parameters | Returns |
107 | |------|-----------|----------|-------|
108 | | GetPascalVOCMetrics | Get the metrics used by the VOC Pascal 2012 challenge | `boundingboxes`: Object of the class `BoundingBoxes` representing ground truth and detected bounding boxes; `IOUThreshold`: IOU threshold indicating which detections will be considered TP or FP (default value = 0.5); | List of dictionaries. Each dictionary contains information and metrics of each class. The keys of each dictionary are: `dict['class']`: class representing the current dictionary; `dict['precision']`: array with the precision values; `dict['recall']`: array with the recall values; `dict['AP']`: **average precision**; `dict['interpolated precision']`: interpolated precision values; `dict['interpolated recall']`: interpolated recall values; `dict['total positives']`: total number of ground truth positives; `dict['total TP']`: total number of True Positive detections; `dict['total FP']`: total number of False Negative detections; |
109 | PlotPrecisionRecallCurve | Plot the Precision x Recall curve for a given class | `classId`: The class that will be plot; `boundingBoxes`: Object of the class `BoundingBoxes` representing ground truth and detected bounding boxes; `IOUThreshold`: IOU threshold indicating which detections will be considered TP or FP (default value = 0.5); `showAP`: if True, the average precision value will be shown in the title of the graph (default = False); `showInterpolatedPrecision`: if True, it will show in the plot the interpolated precision (default = False); `savePath`: if informed, the plot will be saved as an image in this path (ex: `/home/mywork/ap.png`) (default = None); `showGraphic`: if True, the plot will be shown (default = True) | The dictionary containing information and metric about the class. The keys of the dictionary are: `dict['class']`: class representing the current dictionary; `dict['precision']`: array with the precision values; `dict['recall']`: array with the recall values; `dict['AP']`: **average precision**; `dict['interpolated precision']`: interpolated precision values; `dict['interpolated recall']`: interpolated recall values; `dict['total positives']`: total number of ground truth positives; `dict['total TP']`: total number of True Positive detections; `dict['total FP']`: total number of False Negative detections |
110 |
111 | The snippet below is used to plot the Precision x Recall curve:
112 |
113 | ```python
114 | # Plot Precision x Recall curve
115 | evaluator.PlotPrecisionRecallCurve('object', # Class to show
116 | boundingboxes, # Object containing all bounding boxes (ground truths and detections)
117 | IOUThreshold=0.3, # IOU threshold
118 | showAP=True, # Show Average Precision in the title of the plot
119 | showInterpolatedPrecision=False) # Don't plot the interpolated precision curve
120 | ```
121 |
122 | We can have access to the Average Precision value of each class using the method `GetPascalVocMetrics`:
123 |
124 | ```python
125 | metricsPerClass = evaluator.GetPascalVOCMetrics(boundingboxes, IOUThreshold=0.3)
126 | print("Average precision values per class:\n")
127 | # Loop through classes to obtain their metrics
128 | for mc in metricsPerClass:
129 | # Get metric values per each class
130 | c = mc['class']
131 | precision = mc['precision']
132 | recall = mc['recall']
133 | average_precision = mc['AP']
134 | ipre = mc['interpolated precision']
135 | irec = mc['interpolated recall']
136 | # Print AP per class
137 | print('%s: %f' % (c, average_precision))
138 | ```
139 |
140 | See [here](https://github.com/rafaelpadilla/Object-Detection-Metrics/blob/master/samples/sample_2/sample_2.py) the full code.
141 |
--------------------------------------------------------------------------------
/samples/sample_2/_init_paths.py:
--------------------------------------------------------------------------------
1 | ###########################################################################################
2 | # #
3 | # Set up paths for the Object Detection Metrics #
4 | # #
5 | # Developed by: Rafael Padilla (rafael.padilla@smt.ufrj.br) #
6 | # SMT - Signal Multimedia and Telecommunications Lab #
7 | # COPPE - Universidade Federal do Rio de Janeiro #
8 | # Last modification: May 24th 2018 #
9 | ###########################################################################################
10 |
11 | import sys
12 | import os
13 |
14 |
15 | def add_path(path):
16 | if path not in sys.path:
17 | sys.path.insert(0, path)
18 |
19 |
20 | currentPath = os.path.dirname(os.path.realpath(__file__))
21 |
22 | # Add lib to PYTHONPATH
23 | libPath = os.path.join(currentPath, '..', '..', 'lib')
24 | add_path(libPath)
25 |
--------------------------------------------------------------------------------
/samples/sample_2/detections/00001.txt:
--------------------------------------------------------------------------------
1 | object .88 5 67 31 48
2 | object .70 119 111 40 67
3 | object .80 124 9 49 67
4 |
--------------------------------------------------------------------------------
/samples/sample_2/detections/00002.txt:
--------------------------------------------------------------------------------
1 | object .71 64 111 64 58
2 | object .54 26 140 60 47
3 | object .74 19 18 43 35
4 |
--------------------------------------------------------------------------------
/samples/sample_2/detections/00003.txt:
--------------------------------------------------------------------------------
1 | object .18 109 15 77 39
2 | object .67 86 63 46 45
3 | object .38 160 62 36 53
4 | object .91 105 131 47 47
5 | object .44 18 148 40 44
6 |
--------------------------------------------------------------------------------
/samples/sample_2/detections/00004.txt:
--------------------------------------------------------------------------------
1 | object .35 83 28 28 26
2 | object .78 28 68 42 67
3 | object .45 87 89 25 39
4 | object .14 10 155 60 26
5 |
--------------------------------------------------------------------------------
/samples/sample_2/detections/00005.txt:
--------------------------------------------------------------------------------
1 | object .62 50 38 28 46
2 | object .44 95 11 53 28
3 | object .95 29 131 72 29
4 | object .23 29 163 72 29
5 |
--------------------------------------------------------------------------------
/samples/sample_2/detections/00006.txt:
--------------------------------------------------------------------------------
1 | object .45 43 48 74 38
2 | object .84 17 155 29 35
3 | object .43 95 110 25 42
4 |
--------------------------------------------------------------------------------
/samples/sample_2/detections/00007.txt:
--------------------------------------------------------------------------------
1 | object .48 16 20 101 88
2 | object .95 33 116 37 49
3 |
--------------------------------------------------------------------------------
/samples/sample_2/groundtruths/00001.txt:
--------------------------------------------------------------------------------
1 | object 25 16 38 56
2 | object 129 123 41 62
3 |
--------------------------------------------------------------------------------
/samples/sample_2/groundtruths/00002.txt:
--------------------------------------------------------------------------------
1 | object 123 11 43 55
2 | object 38 132 59 45
3 |
--------------------------------------------------------------------------------
/samples/sample_2/groundtruths/00003.txt:
--------------------------------------------------------------------------------
1 | object 16 14 35 48
2 | object 123 30 49 44
3 | object 99 139 47 47
4 |
--------------------------------------------------------------------------------
/samples/sample_2/groundtruths/00004.txt:
--------------------------------------------------------------------------------
1 | object 53 42 40 52
2 | object 154 43 31 34
3 |
--------------------------------------------------------------------------------
/samples/sample_2/groundtruths/00005.txt:
--------------------------------------------------------------------------------
1 | object 59 31 44 51
2 | object 48 128 34 52
3 |
--------------------------------------------------------------------------------
/samples/sample_2/groundtruths/00006.txt:
--------------------------------------------------------------------------------
1 | object 36 89 52 76
2 | object 62 58 44 67
3 |
--------------------------------------------------------------------------------
/samples/sample_2/groundtruths/00007.txt:
--------------------------------------------------------------------------------
1 | object 28 31 55 63
2 | object 58 67 50 58
3 |
--------------------------------------------------------------------------------
/samples/sample_2/sample_2.py:
--------------------------------------------------------------------------------
1 | ###########################################################################################
2 | # #
3 | # This sample shows how to evaluate object detections applying the following metrics: #
4 | # * Precision x Recall curve ----> used by VOC PASCAL 2012 #
5 | # * Average Precision (AP) ----> used by VOC PASCAL 2012 #
6 | # #
7 | # Developed by: Rafael Padilla (rafael.padilla@smt.ufrj.br) #
8 | # SMT - Signal Multimedia and Telecommunications Lab #
9 | # COPPE - Universidade Federal do Rio de Janeiro #
10 | # Last modification: May 24th 2018 #
11 | ###########################################################################################
12 |
13 | import _init_paths
14 | from BoundingBox import BoundingBox
15 | from BoundingBoxes import BoundingBoxes
16 | from Evaluator import *
17 | from utils import *
18 |
19 |
20 | def getBoundingBoxes():
21 | """Read txt files containing bounding boxes (ground truth and detections)."""
22 | allBoundingBoxes = BoundingBoxes()
23 | import glob
24 | import os
25 | # Read ground truths
26 | currentPath = os.path.dirname(os.path.abspath(__file__))
27 | folderGT = os.path.join(currentPath, 'groundtruths')
28 | os.chdir(folderGT)
29 | files = glob.glob("*.txt")
30 | files.sort()
31 | # Class representing bounding boxes (ground truths and detections)
32 | allBoundingBoxes = BoundingBoxes()
33 | # Read GT detections from txt file
34 | # Each line of the files in the groundtruths folder represents a ground truth bounding box
35 | # (bounding boxes that a detector should detect)
36 | # Each value of each line is "class_id, x, y, width, height" respectively
37 | # Class_id represents the class of the bounding box
38 | # x, y represents the most top-left coordinates of the bounding box
39 | # x2, y2 represents the most bottom-right coordinates of the bounding box
40 | for f in files:
41 | nameOfImage = f.replace(".txt", "")
42 | fh1 = open(f, "r")
43 | for line in fh1:
44 | line = line.replace("\n", "")
45 | if line.replace(' ', '') == '':
46 | continue
47 | splitLine = line.split(" ")
48 | idClass = splitLine[0] # class
49 | x = float(splitLine[1]) # confidence
50 | y = float(splitLine[2])
51 | w = float(splitLine[3])
52 | h = float(splitLine[4])
53 | bb = BoundingBox(
54 | nameOfImage,
55 | idClass,
56 | x,
57 | y,
58 | w,
59 | h,
60 | CoordinatesType.Absolute, (200, 200),
61 | BBType.GroundTruth,
62 | format=BBFormat.XYWH)
63 | allBoundingBoxes.addBoundingBox(bb)
64 | fh1.close()
65 | # Read detections
66 | folderDet = os.path.join(currentPath, 'detections')
67 | os.chdir(folderDet)
68 | files = glob.glob("*.txt")
69 | files.sort()
70 | # Read detections from txt file
71 | # Each line of the files in the detections folder represents a detected bounding box.
72 | # Each value of each line is "class_id, confidence, x, y, width, height" respectively
73 | # Class_id represents the class of the detected bounding box
74 | # Confidence represents confidence (from 0 to 1) that this detection belongs to the class_id.
75 | # x, y represents the most top-left coordinates of the bounding box
76 | # x2, y2 represents the most bottom-right coordinates of the bounding box
77 | for f in files:
78 | # nameOfImage = f.replace("_det.txt","")
79 | nameOfImage = f.replace(".txt", "")
80 | # Read detections from txt file
81 | fh1 = open(f, "r")
82 | for line in fh1:
83 | line = line.replace("\n", "")
84 | if line.replace(' ', '') == '':
85 | continue
86 | splitLine = line.split(" ")
87 | idClass = splitLine[0] # class
88 | confidence = float(splitLine[1]) # confidence
89 | x = float(splitLine[2])
90 | y = float(splitLine[3])
91 | w = float(splitLine[4])
92 | h = float(splitLine[5])
93 | bb = BoundingBox(
94 | nameOfImage,
95 | idClass,
96 | x,
97 | y,
98 | w,
99 | h,
100 | CoordinatesType.Absolute, (200, 200),
101 | BBType.Detected,
102 | confidence,
103 | format=BBFormat.XYWH)
104 | allBoundingBoxes.addBoundingBox(bb)
105 | fh1.close()
106 | return allBoundingBoxes
107 |
108 |
109 | def createImages(dictGroundTruth, dictDetected):
110 | """Create representative images with bounding boxes."""
111 | import numpy as np
112 | import cv2
113 | # Define image size
114 | width = 200
115 | height = 200
116 | # Loop through the dictionary with ground truth detections
117 | for key in dictGroundTruth:
118 | image = np.zeros((height, width, 3), np.uint8)
119 | gt_boundingboxes = dictGroundTruth[key]
120 | image = gt_boundingboxes.drawAllBoundingBoxes(image)
121 | detection_boundingboxes = dictDetected[key]
122 | image = detection_boundingboxes.drawAllBoundingBoxes(image)
123 | # Show detection and its GT
124 | cv2.imshow(key, image)
125 | cv2.waitKey()
126 |
127 |
128 | # Read txt files containing bounding boxes (ground truth and detections)
129 | boundingboxes = getBoundingBoxes()
130 | # Uncomment the line below to generate images based on the bounding boxes
131 | # createImages(dictGroundTruth, dictDetected)
132 | # Create an evaluator object in order to obtain the metrics
133 | evaluator = Evaluator()
134 | ##############################################################
135 | # VOC PASCAL Metrics
136 | ##############################################################
137 | # Plot Precision x Recall curve
138 | evaluator.PlotPrecisionRecallCurve(
139 | boundingboxes, # Object containing all bounding boxes (ground truths and detections)
140 | IOUThreshold=0.3, # IOU threshold
141 | method=MethodAveragePrecision.EveryPointInterpolation, # As the official matlab code
142 | showAP=True, # Show Average Precision in the title of the plot
143 | showInterpolatedPrecision=True) # Plot the interpolated precision curve
144 | # Get metrics with PASCAL VOC metrics
145 | metricsPerClass = evaluator.GetPascalVOCMetrics(
146 | boundingboxes, # Object containing all bounding boxes (ground truths and detections)
147 | IOUThreshold=0.3, # IOU threshold
148 | method=MethodAveragePrecision.EveryPointInterpolation) # As the official matlab code
149 | print("Average precision values per class:\n")
150 | # Loop through classes to obtain their metrics
151 | for mc in metricsPerClass:
152 | # Get metric values per each class
153 | c = mc['class']
154 | precision = mc['precision']
155 | recall = mc['recall']
156 | average_precision = mc['AP']
157 | ipre = mc['interpolated precision']
158 | irec = mc['interpolated recall']
159 | # Print AP per class
160 | print('%s: %f' % (c, average_precision))
161 |
--------------------------------------------------------------------------------