├── .github
└── workflows
│ └── python-package.yml
├── .gitignore
├── LICENSE
├── README.md
├── data
└── predictions.csv
├── example.py
├── images
├── PR.PNG
├── TP.PNG
├── coco.PNG
├── inf.PNG
├── predictions.PNG
└── summary.PNG
├── nbs
└── object-detection-metrics.ipynb
├── objdetecteval
├── __init__.py
├── data
│ ├── __init__.py
│ └── bbox_formats.py
├── metrics
│ ├── __init__.py
│ ├── coco_metrics.py
│ ├── image_metrics.py
│ └── iou.py
└── test
│ ├── __init__.py
│ ├── data
│ ├── __init__.py
│ └── test_bbox_formats.py
│ └── metrics
│ ├── __init__.py
│ ├── test_coco_metrics.py
│ └── test_image_metrics.py
├── requirements.txt
└── setup.py
/.github/workflows/python-package.yml:
--------------------------------------------------------------------------------
1 | # This workflow will install Python dependencies, run tests and lint with a variety of Python versions
2 | # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
3 |
4 | name: Python package
5 |
6 | on:
7 | push:
8 | branches: [ main ]
9 | pull_request:
10 | branches: [ main ]
11 |
12 | jobs:
13 | build:
14 |
15 | runs-on: ubuntu-latest
16 | strategy:
17 | fail-fast: false
18 | matrix:
19 | python-version: [3.7, 3.8, 3.9]
20 |
21 | steps:
22 | - uses: actions/checkout@v2
23 | - name: Set up Python ${{ matrix.python-version }}
24 | uses: actions/setup-python@v2
25 | with:
26 | python-version: ${{ matrix.python-version }}
27 | - name: Install dependencies
28 | run: |
29 | python -m pip install --upgrade pip
30 | python -m pip install flake8 pytest
31 | if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
32 | - name: Lint with flake8
33 | run: |
34 | # stop the build if there are Python syntax errors or undefined names
35 | flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
36 | # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
37 | flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
38 | - name: Test with pytest
39 | run: |
40 | pytest
41 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | pip-wheel-metadata/
24 | share/python-wheels/
25 | *.egg-info/
26 | .installed.cfg
27 | *.egg
28 | MANIFEST
29 |
30 | # PyInstaller
31 | # Usually these files are written by a python script from a template
32 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
33 | *.manifest
34 | *.spec
35 |
36 | # Installer logs
37 | pip-log.txt
38 | pip-delete-this-directory.txt
39 |
40 | # Unit test / coverage reports
41 | htmlcov/
42 | .tox/
43 | .nox/
44 | .coverage
45 | .coverage.*
46 | .cache
47 | nosetests.xml
48 | coverage.xml
49 | *.cover
50 | *.py,cover
51 | .hypothesis/
52 | .pytest_cache/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | target/
76 |
77 | # Jupyter Notebook
78 | .ipynb_checkpoints
79 |
80 | # IPython
81 | profile_default/
82 | ipython_config.py
83 |
84 | # pyenv
85 | .python-version
86 |
87 | # pipenv
88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
91 | # install all needed dependencies.
92 | #Pipfile.lock
93 |
94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
95 | __pypackages__/
96 |
97 | # Celery stuff
98 | celerybeat-schedule
99 | celerybeat.pid
100 |
101 | # SageMath parsed files
102 | *.sage.py
103 |
104 | # Environments
105 | .env
106 | .venv
107 | env/
108 | venv/
109 | ENV/
110 | env.bak/
111 | venv.bak/
112 |
113 | # Spyder project settings
114 | .spyderproject
115 | .spyproject
116 |
117 | # Rope project settings
118 | .ropeproject
119 |
120 | # mkdocs documentation
121 | /site
122 |
123 | # mypy
124 | .mypy_cache/
125 | .dmypy.json
126 | dmypy.json
127 |
128 | # Pyre type checker
129 | .pyre/
130 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2021 alexhock
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Object Detection Metrics and Analysis in Python
2 |
3 | Creating evaluation metrics for projects involving object detection takes a surprising amount of time. This repo contains code we've found useful to speed up the results analysis for object detection projects. It provides:
4 |
5 | 1. Easy creation of a pandas inference dataframe enabling detailed analysis.
6 | 2. Summary statistics for easy plotting.
7 | 3. Calculation of coco metrics using the same pandas dataframe. (uses pycocotools).
8 |
9 | To see a quick example of the functionality have a look at the [starter notebook](./nbs/object-detection-metrics.ipynb).
10 |
11 | ## Installation
12 |
13 | pip install git+https://github.com/alexhock/object-detection-metrics
14 |
15 | Imports:
16 |
17 | from objdetecteval.metrics import (
18 | image_metrics as im,
19 | coco_metrics as cm
20 | )
21 |
22 | ## Example usage
23 |
24 |
25 | Take predictions in a pandas dataframe and similar labels dataframe (same columns except for score) and calculate an 'inference' dataframe:
26 |
27 | 
28 |
29 | infer_df = im.get_inference_metrics_from_df(preds_df, labels_df)
30 | infer_df.head()
31 |
32 | The inference dataframe enables easy analysis of the results for example:
33 | 1. IoU stats by class and failure category
34 | 2. Highest scoring false positive predictions
35 | 3. Comparison of bounding box distributions for FP and TP
36 | 4. ... etc. ..
37 |
38 |
39 | 
40 |
41 | class_summary_df = im.summarise_inference_metrics(infer_df)
42 | class_summary_df
43 |
44 | 
45 |
46 | This makes it easy to plot:
47 |
48 |
49 | figsize = (5, 5)
50 | fontsize = 16
51 |
52 | fig_confusion = (
53 | class_summary_df[["TP", "FP", "FN"]]
54 | .plot(kind="bar", figsize=figsize, width=1, align="center", fontsize=fontsize)
55 | .get_figure()
56 | )
57 |
58 |
59 | 
60 |
61 | fig_pr = (
62 | class_summary_df[["Precision", "Recall"]]
63 | .plot(kind="bar", figsize=figsize, width=1, align="center", fontsize=fontsize)
64 | .get_figure()
65 | )
66 |
67 | 
68 |
69 | ## Coco metrics are just as simple
70 |
71 | Use the dataframes to calculate full coco metrics
72 |
73 |
74 | res = cm.get_coco_from_dfs(preds_df, labels_df, False)
75 | res
76 |
77 | 
78 |
--------------------------------------------------------------------------------
/data/predictions.csv:
--------------------------------------------------------------------------------
1 | id,xmin,ymin,xmax,ymax,label,score,image_name
2 | 1,686,295,854,451,Car,0.707925,000000001.jpg
3 | 2,289,1039,475,1212,Car,0.67978,000000001.jpg
4 | 3,971,677,1192,902,Truck,0.664441,000000001.jpg
5 | 4,1294,1222,1497,1408,Car,0.645636,000000002.jpg
6 | 5,686,295,854,451,Truck,0.707925,000000002.jpg
7 | 6,289,1039,475,1212,Truck,0.67978,000000003.jpg
8 | 7,971,677,1192,902,Truck,0.664441,000000003.jpg
9 | 8,1294,1222,1497,1408,Van,0.645636,000000003.jpg
10 |
--------------------------------------------------------------------------------
/example.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 | from objdetecteval.metrics import (
3 | image_metrics as im,
4 | coco_metrics as cm
5 | )
6 |
7 |
8 | def main():
9 |
10 | preds_path = './data/predictions.csv'
11 | labels_path = preds_path
12 |
13 | preds_df = pd.read_csv(preds_path)
14 | labels_df = pd.read_csv(labels_path)
15 |
16 | infer_df = im.get_inference_metrics_from_df(preds_df, labels_df)
17 | print(infer_df.head())
18 | class_summary_df = im.summarise_inference_metrics(infer_df)
19 | print(class_summary_df.head())
20 |
21 | figsize = (10, 10)
22 | fontsize = 24
23 |
24 | fig_confusion = (
25 | class_summary_df[["TP", "FP", "FN"]]
26 | .plot(kind="bar", figsize=figsize, width=1, align="center", fontsize=fontsize)
27 | .get_figure()
28 | )
29 | fig_confusion.savefig('./confusion.png')
30 |
31 | fig_pr = (
32 | class_summary_df[["Precision", "Recall"]]
33 | .plot(kind="bar", figsize=figsize, width=1, align="center", fontsize=fontsize)
34 | .get_figure()
35 | )
36 | fig_pr.savefig('./pr.png')
37 |
38 | # get coco
39 | res = cm.get_coco_from_dfs(preds_df, labels_df, False)
40 | print(res)
41 |
42 |
43 | if __name__ == "__main__":
44 | main()
45 |
--------------------------------------------------------------------------------
/images/PR.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/alexhock/object-detection-metrics/4ba0cfa7677f8f0b65776567b6ccb7b45ac3b1e0/images/PR.PNG
--------------------------------------------------------------------------------
/images/TP.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/alexhock/object-detection-metrics/4ba0cfa7677f8f0b65776567b6ccb7b45ac3b1e0/images/TP.PNG
--------------------------------------------------------------------------------
/images/coco.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/alexhock/object-detection-metrics/4ba0cfa7677f8f0b65776567b6ccb7b45ac3b1e0/images/coco.PNG
--------------------------------------------------------------------------------
/images/inf.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/alexhock/object-detection-metrics/4ba0cfa7677f8f0b65776567b6ccb7b45ac3b1e0/images/inf.PNG
--------------------------------------------------------------------------------
/images/predictions.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/alexhock/object-detection-metrics/4ba0cfa7677f8f0b65776567b6ccb7b45ac3b1e0/images/predictions.PNG
--------------------------------------------------------------------------------
/images/summary.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/alexhock/object-detection-metrics/4ba0cfa7677f8f0b65776567b6ccb7b45ac3b1e0/images/summary.PNG
--------------------------------------------------------------------------------
/nbs/object-detection-metrics.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "id": "01ddcbcf",
7 | "metadata": {},
8 | "outputs": [],
9 | "source": [
10 | "import os\n",
11 | "import sys\n",
12 | "sys.path.insert(0, os.path.abspath('../'))"
13 | ]
14 | },
15 | {
16 | "cell_type": "code",
17 | "execution_count": 2,
18 | "id": "1f22bb98",
19 | "metadata": {},
20 | "outputs": [],
21 | "source": [
22 | "import pandas as pd\n",
23 | "from objdetecteval.metrics import image_metrics as im, coco_metrics as cm"
24 | ]
25 | },
26 | {
27 | "cell_type": "markdown",
28 | "id": "58d50d4c",
29 | "metadata": {},
30 | "source": [
31 | "## Load Predictions and Labels in CSV format"
32 | ]
33 | },
34 | {
35 | "cell_type": "code",
36 | "execution_count": 3,
37 | "id": "cb409351",
38 | "metadata": {},
39 | "outputs": [],
40 | "source": [
41 | "preds_path = '../data/predictions.csv'\n",
42 | "labels_path = preds_path\n",
43 | "\n",
44 | "preds_df = pd.read_csv(preds_path)\n",
45 | "labels_df = pd.read_csv(labels_path)"
46 | ]
47 | },
48 | {
49 | "cell_type": "code",
50 | "execution_count": 4,
51 | "id": "4b3d49de",
52 | "metadata": {},
53 | "outputs": [
54 | {
55 | "data": {
56 | "text/html": [
57 | "
\n",
58 | "\n",
71 | "
\n",
72 | " \n",
73 | " \n",
74 | " | \n",
75 | " id | \n",
76 | " xmin | \n",
77 | " ymin | \n",
78 | " xmax | \n",
79 | " ymax | \n",
80 | " label | \n",
81 | " score | \n",
82 | " image_name | \n",
83 | "
\n",
84 | " \n",
85 | " \n",
86 | " \n",
87 | " 0 | \n",
88 | " 1 | \n",
89 | " 686 | \n",
90 | " 295 | \n",
91 | " 854 | \n",
92 | " 451 | \n",
93 | " Car | \n",
94 | " 0.707925 | \n",
95 | " 000000001.jpg | \n",
96 | "
\n",
97 | " \n",
98 | " 1 | \n",
99 | " 2 | \n",
100 | " 289 | \n",
101 | " 1039 | \n",
102 | " 475 | \n",
103 | " 1212 | \n",
104 | " Car | \n",
105 | " 0.679780 | \n",
106 | " 000000001.jpg | \n",
107 | "
\n",
108 | " \n",
109 | " 2 | \n",
110 | " 3 | \n",
111 | " 971 | \n",
112 | " 677 | \n",
113 | " 1192 | \n",
114 | " 902 | \n",
115 | " Truck | \n",
116 | " 0.664441 | \n",
117 | " 000000001.jpg | \n",
118 | "
\n",
119 | " \n",
120 | " 3 | \n",
121 | " 4 | \n",
122 | " 1294 | \n",
123 | " 1222 | \n",
124 | " 1497 | \n",
125 | " 1408 | \n",
126 | " Car | \n",
127 | " 0.645636 | \n",
128 | " 000000002.jpg | \n",
129 | "
\n",
130 | " \n",
131 | " 4 | \n",
132 | " 5 | \n",
133 | " 686 | \n",
134 | " 295 | \n",
135 | " 854 | \n",
136 | " 451 | \n",
137 | " Truck | \n",
138 | " 0.707925 | \n",
139 | " 000000002.jpg | \n",
140 | "
\n",
141 | " \n",
142 | "
\n",
143 | "
"
144 | ],
145 | "text/plain": [
146 | " id xmin ymin xmax ymax label score image_name\n",
147 | "0 1 686 295 854 451 Car 0.707925 000000001.jpg\n",
148 | "1 2 289 1039 475 1212 Car 0.679780 000000001.jpg\n",
149 | "2 3 971 677 1192 902 Truck 0.664441 000000001.jpg\n",
150 | "3 4 1294 1222 1497 1408 Car 0.645636 000000002.jpg\n",
151 | "4 5 686 295 854 451 Truck 0.707925 000000002.jpg"
152 | ]
153 | },
154 | "execution_count": 4,
155 | "metadata": {},
156 | "output_type": "execute_result"
157 | }
158 | ],
159 | "source": [
160 | "preds_df.head()"
161 | ]
162 | },
163 | {
164 | "cell_type": "markdown",
165 | "id": "f0a78755",
166 | "metadata": {},
167 | "source": [
168 | "## Calc inference dataframe and output metrics"
169 | ]
170 | },
171 | {
172 | "cell_type": "markdown",
173 | "id": "479404f7",
174 | "metadata": {},
175 | "source": [
176 | "### Calculate inference dataframe - one row for each bounding box"
177 | ]
178 | },
179 | {
180 | "cell_type": "code",
181 | "execution_count": 5,
182 | "id": "745ee952",
183 | "metadata": {},
184 | "outputs": [
185 | {
186 | "data": {
187 | "text/html": [
188 | "\n",
189 | "\n",
202 | "
\n",
203 | " \n",
204 | " \n",
205 | " | \n",
206 | " image_id | \n",
207 | " class | \n",
208 | " TP | \n",
209 | " TN | \n",
210 | " FP | \n",
211 | " FN | \n",
212 | " Confidence | \n",
213 | " IoU | \n",
214 | "
\n",
215 | " \n",
216 | " \n",
217 | " \n",
218 | " 0 | \n",
219 | " 000000001.jpg | \n",
220 | " Car | \n",
221 | " 1 | \n",
222 | " 0 | \n",
223 | " 0 | \n",
224 | " 0 | \n",
225 | " 0.707925 | \n",
226 | " 1.0 | \n",
227 | "
\n",
228 | " \n",
229 | " 1 | \n",
230 | " 000000001.jpg | \n",
231 | " Car | \n",
232 | " 1 | \n",
233 | " 0 | \n",
234 | " 0 | \n",
235 | " 0 | \n",
236 | " 0.679780 | \n",
237 | " 1.0 | \n",
238 | "
\n",
239 | " \n",
240 | " 2 | \n",
241 | " 000000001.jpg | \n",
242 | " Truck | \n",
243 | " 1 | \n",
244 | " 0 | \n",
245 | " 0 | \n",
246 | " 0 | \n",
247 | " 0.664441 | \n",
248 | " 1.0 | \n",
249 | "
\n",
250 | " \n",
251 | " 3 | \n",
252 | " 000000002.jpg | \n",
253 | " Car | \n",
254 | " 1 | \n",
255 | " 0 | \n",
256 | " 0 | \n",
257 | " 0 | \n",
258 | " 0.645636 | \n",
259 | " 1.0 | \n",
260 | "
\n",
261 | " \n",
262 | " 4 | \n",
263 | " 000000002.jpg | \n",
264 | " Truck | \n",
265 | " 1 | \n",
266 | " 0 | \n",
267 | " 0 | \n",
268 | " 0 | \n",
269 | " 0.707925 | \n",
270 | " 1.0 | \n",
271 | "
\n",
272 | " \n",
273 | "
\n",
274 | "
"
275 | ],
276 | "text/plain": [
277 | " image_id class TP TN FP FN Confidence IoU\n",
278 | "0 000000001.jpg Car 1 0 0 0 0.707925 1.0\n",
279 | "1 000000001.jpg Car 1 0 0 0 0.679780 1.0\n",
280 | "2 000000001.jpg Truck 1 0 0 0 0.664441 1.0\n",
281 | "3 000000002.jpg Car 1 0 0 0 0.645636 1.0\n",
282 | "4 000000002.jpg Truck 1 0 0 0 0.707925 1.0"
283 | ]
284 | },
285 | "execution_count": 5,
286 | "metadata": {},
287 | "output_type": "execute_result"
288 | }
289 | ],
290 | "source": [
291 | "infer_df = im.get_inference_metrics_from_df(preds_df, labels_df)\n",
292 | "infer_df.head()"
293 | ]
294 | },
295 | {
296 | "cell_type": "markdown",
297 | "id": "756977b0",
298 | "metadata": {},
299 | "source": [
300 | "### Summarise inference dataframe"
301 | ]
302 | },
303 | {
304 | "cell_type": "code",
305 | "execution_count": 6,
306 | "id": "c18a4ab7",
307 | "metadata": {},
308 | "outputs": [
309 | {
310 | "data": {
311 | "text/html": [
312 | "\n",
313 | "\n",
326 | "
\n",
327 | " \n",
328 | " \n",
329 | " | \n",
330 | " class | \n",
331 | " TP | \n",
332 | " FP | \n",
333 | " FN | \n",
334 | " Total | \n",
335 | " Precision | \n",
336 | " Recall | \n",
337 | "
\n",
338 | " \n",
339 | " \n",
340 | " \n",
341 | " 0 | \n",
342 | " Car | \n",
343 | " 3 | \n",
344 | " 0 | \n",
345 | " 0 | \n",
346 | " 3 | \n",
347 | " 1.0 | \n",
348 | " 1.0 | \n",
349 | "
\n",
350 | " \n",
351 | " 1 | \n",
352 | " Truck | \n",
353 | " 4 | \n",
354 | " 0 | \n",
355 | " 0 | \n",
356 | " 4 | \n",
357 | " 1.0 | \n",
358 | " 1.0 | \n",
359 | "
\n",
360 | " \n",
361 | " 2 | \n",
362 | " Van | \n",
363 | " 1 | \n",
364 | " 0 | \n",
365 | " 0 | \n",
366 | " 1 | \n",
367 | " 1.0 | \n",
368 | " 1.0 | \n",
369 | "
\n",
370 | " \n",
371 | "
\n",
372 | "
"
373 | ],
374 | "text/plain": [
375 | " class TP FP FN Total Precision Recall\n",
376 | "0 Car 3 0 0 3 1.0 1.0\n",
377 | "1 Truck 4 0 0 4 1.0 1.0\n",
378 | "2 Van 1 0 0 1 1.0 1.0"
379 | ]
380 | },
381 | "execution_count": 6,
382 | "metadata": {},
383 | "output_type": "execute_result"
384 | }
385 | ],
386 | "source": [
387 | "class_summary_df = im.summarise_inference_metrics(infer_df)\n",
388 | "class_summary_df"
389 | ]
390 | },
391 | {
392 | "cell_type": "markdown",
393 | "id": "0a215fc6",
394 | "metadata": {},
395 | "source": [
396 | "## Plot from the summary "
397 | ]
398 | },
399 | {
400 | "cell_type": "code",
401 | "execution_count": 7,
402 | "id": "febf9ec5",
403 | "metadata": {},
404 | "outputs": [
405 | {
406 | "data": {
407 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAUUAAAEvCAYAAADSG9NhAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjQuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8rg+JYAAAACXBIWXMAAAsTAAALEwEAmpwYAAAXp0lEQVR4nO3df7RdZX3n8feHGAiKIsRUZxFDwmAVWJQocZUZBSJWkAqhLlGXomMFjbWtM2qZFs3oAIWF1p9rOnU0jrW0tGr9wWBkrKIQxErUoIJExeIQJQo1JlHkp0S/88c51z5c7s3d595zf4S8X2vtde559vPs5znnj8999o+zd6oKSVLPXrM9AEmaSwxFSWoYipLUMBQlqWEoSlLDUJSkxsNmewATecxjHlNLly6d7WFIeoi57rrrflJVi0aXz/lQXLp0KRs3bpztYUh6iEny/bHK3X2WpIahKEkNQ1GSGnP+mKKk2XP//fezZcsW7r333tkeyqQtWLCAxYsXM3/+/E71DUVJ49qyZQuPfOQjWbp0KUlmezgDqyq2bdvGli1bWLZsWac2k9p9TvJPSSrJBR3qLkjytiS3JbknybVJjptMv5Jm1r333svChQt3y0AESMLChQsHmukOHIpJXgQcNUCTDwCvBN4MnALcBnwmyfJB+5Y083bXQBwx6PgH2n1OcgDwLuB1wD90qH8U8GLgzKr6YL/samATcD6waqDRStqjbNu2jWc+85kA3H777cybN49Fi3rXW19//fUcddRR7Ny5k8MOO4yLL76Yhz/84VPuc9Bjim8FbqyqDyWZMBTphd79wEdGCqpqZ5IPA+ck2aeq7htwDJJmydJzLh/q9ja/5Tm7XL9w4UK+8Y1vAHDuueey3377cfbZZwOw3377/XrdGWecwXvf+15e//rXT3lMnXefkzwd+E/AHw2w/SOAW6rq7lHlm4C9gUMH2JYkjenYY4/l5ptvHsq2OoVikr2B9wFvr6qbBtj+gcCOMcq3N+sladJ27tzJpz/9aY488sihbK/r7vOfAvsCFw6l1wkkWQ2sBliyZMlMdKkOhr3rtCsT7VZJ99xzD8uXLwd6M8WzzjprKNudMBSTLAHWAK8A9kmyT7N6nySPBn5eVb8co/kO4OAxykdmiNvHWEdVrQXWAqxYscIna0l6kH333ffXxxSHqcvu8yHAAuASeiE3sgCc3f97vHnrJmBZktGnhA4HfgEM5yCAJA1Jl1D8BvCMMRboBeUzGD/c1gHzgeePFCR5GPBC4LOeeZY010y4+1xVPwXWjy7vXxD5/apa339/MPA94PyqOr/f9utJPgK8O8l84Bbg1cAy4IyhfAJJM2Y2j/Wee+65D3h/5513Tks/w/ztc4B5PHj2+XJ6J2guAB4NXA88u6q+NsS+JWkoJh2KVZVR7zfTC8bR9e4BXt9fJGlO836KktQwFCWpYShKUsNQlKSGoShpTps3bx7Lly//9bJ582bWr1/P/vvvz/LlyznssMM477zzhtafjyOQ1N25+w95ez+bsMpYP+fbvHkzxx57LJ/61Ke46667WL58OaeeeipPecpTpjwkZ4qSdmuPeMQjOProo2f21mGSNFtG7oazfPlynvvc5z5o/bZt29iwYQNHHHHEUPpz91nSnDbe3XCuueYanvzkJ7PXXntxzjnnGIqS9mwjxxSHzd1nSWoYipLUcPdZUncdLqEZtrFuEbZy5UpWrlw5Lf05U5SkhqEoSQ1DUZIahqIkNQxFSWoYipLUMBQlzWnj3TosCevWrft1vVNOOYX169dPub9O1ykmOQn4M3oPsT8A2Ap8CTi3qr61i3ZL6T3WdCwH9B+fKmk3ceTFRw51e9982TcnrDPercMWL17MhRdeyKmnnjrUMXWdKR4IXAf8MXAi8AbgCGBD/3nPE7kI+A+jlp8PPFpJ6jvqqKPYf//9ueKKK4a63U4zxar6EPChtizJV4DvAKcD75hgE/+vqjZMaoSS9mgjtw4DWLZsGZdeeumv161Zs4Y3velNPOtZzxpaf1P5md+2/uvOYQxEksYy3q3DAI477jgAvvjFLw6tv4FOtCSZl2TvJE8A3gfczqgZ5DguSrIzyc+SfDLJcA9MSNpjrVmzhgsuuGBo2xv07POXgfuA7wK/BZxQVT/eRf376IXnq4BnAGcDRwJfSnLYeI2SrE6yMcnGrVu3DjhESXuSE088kR07dnDDDTcMZXuDhuJLgWOAFwN3AFf0zzCPqapuq6o/qKpPVNU1VfV+4DiggDW7aLe2qlZU1YpFixYNOERJe5o1a9Zw6623DmVbAx1TrKpv9//8cpJPA5uBc4A/GGAbtyb5IvDUQfqWNPu6XEIzbF1uHbZq1Sqqaij9Tfri7f41hjcDh052E5PtW5Kmy6RDMcljgScB3xuw3RLg6cBXJtu3JE2Xrr9ouRT4GnADvWOJvwm8jt7lOO/o1zke+DxwZlX9bb/sHfSC91p6v4J5Ir0Lv38FXDjMDyJJw9D1mOIG4AXAnwB7A7cC64GLqmpzv06AeTxw9rkJeDXw+8B+9K5tvBI4r6pumtrQJc2EqiLJbA9j0gY91tj1Fy1vBd46QZ319IKxLftr4K8HGpGkOWPBggVs27aNhQsX7pbBWFVs27aNBQsWdG7jg6skjWvx4sVs2bKF3fl64QULFrB48eLO9Q1FSeOaP38+y5Ytm+1hzCjvpyhJDUNRkhqGoiQ1DEVJahiKktQwFCWpYShKUsNQlKSGoShJDUNRkhqGoiQ1DEVJahiKktQwFCWpYShKUsNQlKSGoShJjU6hmOSkJFcmuT3JfUm2JPnHJId3aHtAkv+d5CdJ7kryuSRHTn3okjR8XWeKBwLXAX8MnEjvMaVHABuSHDxeo/SedLMOeDbwGuB5wHzgqiTdH5ogSTOk69P8PgR8qC1L8hXgO8Dp9J/9PIZVwNOAE6rqqn67a4FbgD8F/vPkhi1J02MqxxS39V937qLOKuBHI4EIUFU/ozd7PG0KfUvStBgoFJPMS7J3kicA7wNuZ9QMcpQjgBvHKN8ELEmy3yD9S9J0G/QRp18Gju7/fTO93eIf76L+gcDmMcq3918PAO4cvTLJamA1wJIlSwYc4q4tPefyoW5vPJvf8pwZ6UfScA26+/xS4BjgxcAdwBVJlg57UFW1tqpWVNWKRYsWDXvzkjSugUKxqr5dVV/un3h5JrAfcM4umuygNxsc7cBmvSTNGZM+0VJVP6W3C33oLqptondccbTDgR9U1YN2nSVpNk06FJM8FngS8L1dVPskcFCS45t2jwJO7a+TpDml04mWJJcCXwNuoHcs8TeB19G7HOcd/TrHA58Hzqyqv+03/SRwLXBJkv9Kb3f5DUCAvxjex5Ck4eh69nkD8ALgT4C9gVuB9cBFVbW5XyfAPJrZZ1X9KskpwNuB9wAL6IXkM6rq1iGMX5KGqusvWt4KvHWCOuvpBePo8u3Amf1FkuY075IjSQ1DUZIahqIkNQxFSWoYipLUMBQlqWEoSlLDUJSkhqEoSQ1DUZIahqIkNQxFSWoYipLUMBQlqWEoSlLDUJSkhqEoSQ1DUZIahqIkNSYMxSSnJ/l4ku8nuSfJTUkuSvLIDm1rnGX5UEYvSUPW5cFVZwM/AN4IbAGeDJwLPCPJf6yqX03Q/m+A940q++5gw5SkmdElFE+tqq3N+6uTbAcuBlYCV07Q/odVtWGS45OkGTXh7vOoQBzx1f7rQcMdjiTNrsmeaDm+//rtDnVfneS+JHcnuTLJsZPsU5Km3cChmOQg4Hzgc1W1cYLqlwB/CPwOsBpYCFyZZOWg/UrSTOhyTPHXkuwHXAbsBF4+Uf2qemnz9poklwE3AhcAT99FP6vphShLliwZZIiSNCWdZ4pJ9gXWAYcAJ1XVlkE7q6qfA5cDT52g3tqqWlFVKxYtWjRoN5I0aZ1miknmAx8DVgDPqqpvTrHfmmJ7SZoWE4Zikr2AvwdOAE6ZyuU1SR4FnAJ8ZbLbkKTp1GWm+FfA84ELgbuSHNOs21JVW5IcDHwPOL+qzgdIcjbwROAq4EfAwfQuBH8ccMbwPoIkDU+XUDy5/7qmv7TOo/frlgDzeOAxypuA5/aX/YE7gH8GzqoqZ4qS5qQJQ7Gqlnaos5leMLZl6+idmJGk3YZ3yZGkhqEoSQ1DUZIahqIkNQxFSWoYipLUMBQlqWEoSlLDUJSkhqEoSQ1DUZIahqIkNQxFSWoYipLUMBQlqWEoSlLDUJSkhqEoSQ1DUZIahqIkNSYMxSSnJ/l4ku8nuSfJTUkuSvLIDm0XJHlbktv6ba9Nctxwhi5Jw9dlpng28EvgjcCzgf8FvBq4IslE7T8AvBJ4M3AKcBvwmSTLJztgSZpOXZ77fGpVbW3eX51kO3AxsBK4cqxGSY4CXgycWVUf7JddDWwCzgdWTWHckjQtJpwpjgrEEV/tvx60i6argPuBjzTb2gl8GDgpyT4DjFOSZsRkT7Qc33/99i7qHAHcUlV3jyrfBOwNHDrJviVp2gwcikkOorf7+7mq2riLqgcCO8Yo396sH6+P1Uk2Jtm4detYE1VJmh4DhWKS/YDLgJ3Ay6dlREBVra2qFVW1YtGiRdPVjSQ9SOdQTLIvsA44BDipqrZM0GQHcMAY5SMzxO1jrJOkWdUpFJPMBz4GrAB+t6q+2aHZJmBZkoePKj8c+AVw8yADlaSZ0OXi7b2AvwdOAH6vqjZ03PY6YD7w/GZbDwNeCHy2qu4bfLiSNL26XKf4V/SC7ULgriTHNOu2VNWWJAcD3wPOr6rzAarq60k+Ary7P9O8hd5F38uAM4b5ISRpWLrsPp/cf10DXDtqeUV/XYB5Y2zv5cAHgQuAy4HHA8+uqq9NbdiSND0mnClW1dIOdTbTC8bR5fcAr+8vkjTneZccSWoYipLUMBQlqWEoSlLDUJSkhqEoSQ1DUZIahqIkNQxFSWoYipLUMBQlqWEoSlLDUJSkhqEoSQ1DUZIahqIkNQxFSWoYipLUMBQlqdH1uc+Lk/xlkmuT3J2kkizt2HZzv/7o5femMnBJmg5dHnEKcCjwAuA64BrgxAH7+Qxw7qiymwbchiRNu66h+IWqeixAklcweCj+pKo2DNhGkmZcp93nqvrVdA9EkuaCmTrRcmr/WOR9STZ4PFHSXDUTobgOeA1wEnAGcC9waZKXjNcgyeokG5Ns3Lp16wwMUZJ6uh5TnLSqek37PsmlwAbgIuCScdqsBdYCrFixoqZ7jJI0YsavU6yqXwIfBRYn+Xcz3b8k7cpsX7ztLFDSnDLjoZjkYcALgR9U1e0z3b8k7UrnY4pJTu//eXT/9eQkW4GtVXV1v85O4OKqOqv//kXAacD/BW4FHgv8EfAU4EVD+QSSNESDnGj56Kj37+m/Xg2s7P89r7+MuAX4DeBtwIHAXcBG4NlV9ZlBBytJ061zKFZVBq3T/xXLCZMYlyTNitk+0SJJc4qhKEkNQ1GSGoaiJDUMRUlqGIqS1DAUJalhKEpSw1CUpIahKEkNQ1GSGoaiJDUMRUlqGIqS1DAUJalhKEpSw1CUpIahKEkNQ1GSGp1CMcniJH+Z5NokdyepJEs7tt0ryRuSbE5yb5LrkzxvSqOWpGnSdaZ4KPACYAdwzYB9/DlwLvA/gZOBDcBHk/zugNuRpGnX9Wl+X6iqxwIkeQVwYpdGSX4DOBt4S1W9vV98VZJDgbfQex60JM0ZnWaKVfWrSW7/JGBv4JJR5ZcARyZZNsntStK0mO4TLUcA9wE3jyrf1H89fJr7l6SBTHcoHgj8tKpqVPn2Zr0kzRldjynOqCSrgdUAS5YsmeXRSMOx9JzLZ6yvzW95zoz19VAz3TPFHcCjk2RU+cgMcTtjqKq1VbWiqlYsWrRoWgcoSa3pDsVNwD7Avx9VPnIs8VvT3L8kDWS6Q/GfgPuBM0aVvwS4sapumeb+JWkgnY8pJjm9/+fR/deTk2wFtlbV1f06O4GLq+osgKr6cZJ3Am9I8nPga8ALgROAVUP6DJI0NIOcaPnoqPfv6b9eDazs/z2vv7TWAHcC/wV4HHAT8IKq+tRAI5WkGdA5FKtq9MmSTnWq6pfABf1FkuY075IjSQ1DUZIahqIkNQxFSWoYipLUMBQlqWEoSlLDUJSkhqEoSQ1DUZIahqIkNQxFSWoYipLUMBQlqWEoSlLDUJSkhqEoSQ1DUZIahqIkNQxFSWp0CsUkj0/ysSQ/S3JHkk8kWdKxbY2zLJ/SyCVpGkz4NL8kDweuBO4DXgYUvSfzXZXkt6rqrg79/A3wvlFl3x1sqJI0/bo84vSVwCHAE6vqZoAkNwD/ArwKeGeHbfywqjZMepSSNEO67D6vAjaMBCJAVd0C/DNw2nQNTJJmQ5dQPAK4cYzyTcDhHft5dZL7ktyd5Mokx3YeoSTNoC6heCCwY4zy7cABHdpfAvwh8DvAamAhcGWSleM1SLI6ycYkG7du3dqhC0kaji7HFKekql7avL0myWX0Zp4XAE8fp81aYC3AihUrarrHKEkjuswUdzD2jHC8GeQuVdXPgcuBpw7aVpKmW5dQ3ETvuOJohwPfmkLfzgAlzTldQvGTwDFJDhkpSLIUeFp/3UCSPAo4BfjKoG0labp1CcX3A5uBy5KclmQVcBlwK80F2UkOTrIzyZubsrOTvD/Ji5OsTPIyepfyPA5YM8wPIknDMOGJlqq6K8kJwLuAvwMCfB54bVXd2VQNMI8HBu1NwHP7y/7AHfRC8ayqcqYoac7pdPa5qn4APG+COpvpBWNbtg5YN9nBSdJM8y45ktQwFCWpYShKUsNQlKSGoShJDUNRkhqGoiQ1DEVJahiKktQwFCWpYShKUsNQlKSGoShJDUNRkhqGoiQ1DEVJahiKktQwFCWpYShKUqNTKCZ5fJKPJflZkjuSfCLJko5tFyR5W5LbktyT5Nokx01t2JI0PSYMxSQPB64EngS8DHgp8ATgqiSP6NDHB4BXAm+m97zn24DPJFk+yTFL0rTp8jS/VwKHAE+sqpsBktwA/AvwKuCd4zVMchTwYuDMqvpgv+xqYBNwPrBqSqOXpCHrsvu8CtgwEogAVXULvec3n9ah7f3AR5q2O4EPAycl2WfgEUvSNOoSikcAN45Rvgk4vEPbW6rq7jHa7g0c2qF/SZoxXULxQGDHGOXbgQOm0HZkvSTNGV2OKc64JKuB1f23dya5aTbHMxl566SaPQb4yXBHsnvy+5sav79ODh6rsEso7mDsGeF4s8DRbcfqeGSGuH2MdVTVWmBth7E9pCTZWFUrZnscuyu/v6nx++vpsvu8id6xwdEOB77Voe2y/mU9o9v+Arj5wU0kafZ0CcVPAsckOWSkIMlS4Gn9dbuyDpgPPL9p+zDghcBnq+q+QQcsSdOpSyi+H9gMXJbktCSrgMuAW4H3jVRKcnCSnUnePFJWVV+ndznOu5O8Iskz6V2Oswz478P7GA8Ze9whgyHz+5savz8gVTVxpd5P+t4FPAsI8HngtVW1uamzFLgFOK+qzm3K9wUupHcR96OB64E/q6r1w/kIkjQ8nUJRkvYU3iVHkhqGoiQ1DEVJahiKsyDJvklem+SqJP+a5Bf95V/7Za8d49pODSDJcUmunO1xzFVJViY5I8lTxll/UHslyZ7EEy0zLMnj6d2fcim9Ow1t4oG/BT+c3jWg3weeWVU/mIVh7vaSPA/4x6qaN9tjmUuS7Ad8FvhteleSFHAFvdv7/aip99vAl/bE729O/vb5Ie7dwD3AE9pLmlr9y5v+D73LoJ43Q+PaLXS94zuwaFoHsvt6I3AY8PvAV4GVwHnAl5OcVFUT/UrtIc+Z4gxL8jPgJVW1boJ6q4C/q6r9Z2Zku4ckv6I3u5mwKlB74kxnV5J8B3hPVf2Ppuwgej/IWAqcXFVfdaaomTTIfyH/Yz3YPcAXgI9NUG8F/3anJf2bJcDX24Kq+mGS44FPAZ9Lchq973mPZCjOvM8BFya5sX8H8wfp7z7/Ob1jPXqg64FfVtUHdlUpyU8xFMfyY2Dx6MKquivJycDHgcuBd8z0wOYKQ3HmvRa4Cvhukg307mo+cgu2A+jdkegYer83f90sjG+uuw44vWPdTOdAdlMb6T1G5EOjV1TVvf1Z4j8A/409dE/FY4qzoP978NXAqfRCcOT+kjvonY3+JPD+MR7jsMfrH/86tKqunu2x7I76Z+XPBk6pqm3j1AnwHuDZVbVsJsc3FxiKktTw4m1JahiKktQwFCWpYShKUsNQlKTG/wfVSwatuf/UUQAAAABJRU5ErkJggg==\n",
408 | "text/plain": [
409 | ""
410 | ]
411 | },
412 | "metadata": {
413 | "needs_background": "light"
414 | },
415 | "output_type": "display_data"
416 | },
417 | {
418 | "data": {
419 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAUUAAAEvCAYAAADSG9NhAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjQuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8rg+JYAAAACXBIWXMAAAsTAAALEwEAmpwYAAAXbUlEQVR4nO3de7hWZZ3/8fc3MAEzBKN+v1LY8JOMjaLpVpkIwVOiGYx5ajTD8YBezTQ6jgcaGxK1RidPqRXimAe0MhkNLW0MAS0VFU0dUBIaUMkOCBuUo4D374/n2XSz2exn7c0+Ie/Xde3r2dzrvtf67uePD/e613rWEyklJEklH2jvAiSpIzEUJSljKEpSxlCUpIyhKEkZQ1GSMp3bu4BKPvKRj6Sqqqr2LkPS+8xzzz33VkqpV/32Dh+KVVVVzJo1q73LkPQ+ExGvNdTu6bMkZQxFScoYipKU6fBritL2bt26dSxatIg1a9a0dynbpC5durDbbruxww47FOpvKEod3KJFi9h5552pqqoiItq7nG1KSoklS5awaNEi+vbtW2hModPniNgtIm6MiKciYlVEpIioKjj2AxHx9YhYGBFrIuLFiDiuUHWSWLNmDbvuuquB2AwRwa677tqkWXbRNcU9gBOBWuDXTazrcuBS4CbgKGAmcG9EHN3E/UjbLQOx+Zr63hUNxcdTSh9LKR0N3NuEYj4KXABcmVK6OqU0PaV0NjAduLJJlUpqN506dWLfffdlr7324oQTTmDVqlVbvc9x48YxderULW6fMGECd95551Yfp6kKrSmmlN5r5v6PBD4I3FWv/S7ghxHRN6W0oJn7lrZLVWN/0aL7W3jl5yv26dq1Ky+88AIAp5xyChMmTOD888/fuH39+vV07ty0SxSXXXZZo9vPOeecJu2vpbT2LTkDgbXA/Hrtc8qv1a18fEktbOjQocyfP58ZM2YwdOhQRo4cSXV1NRs2bODCCy/kgAMOYNCgQdx8880bx1x11VXsvffe7LPPPowdOxaA0047jcmTJwMwduxYqqurGTRoEBdccAEAl156KVdffTUAL7zwAoMHD2bQoEEce+yx1NbWAjB8+HAuvvhiDjzwQD75yU/y6183dXVvc6199bknsCxt/p0HS7PtkrYR69ev5+GHH2bEiBEAPP/888yePZu+ffsyceJEunfvzrPPPsvatWsZMmQIn/vc55g7dy5Tpkzh6aefplu3bixdunSTfS5ZsoT777+fuXPnEhEsW7Zss+N+5Stf4cYbb2TYsGGMGzeO8ePHc/3112+s6ZlnnuGhhx5i/PjxjZ6SF9Ehb8mJiDHAGIDevXu36L5b+tSjJS3scnJ7l9CoqjU/au8SGvV+ff9uGfl/WbdoWcsWk3mpvO9BH9jyStbq1avZd+CeAAw96NOccdT+PDnrRQ7cp5q+Oy6DN3/LI1N+ykuvzGPyjycBsPydFcyb+TBTZzzF3x97GN2W/Q6WlWdCb74Gq5bC0gV0X9mHLp0SZ/zd33LM4UM55vCDYdUO8M4f4b1uLJ/7OMuWLGbYsGEAjB49mhNOOGFjbV/84hcB2H///Vm4cOFWvx+tHYq1wC4REfVmi3UzxKUNjCGlNBGYCFBTU+M3a0ntrGuXHXnhVz/ZrH2nbl02/p5I3HjFRRw5/DOb9PnvGU81uu/OnTvzzC8m8ehvnmHyL6Zy0233MO3eiYVr23HHHYHSxaD169cXHrclrb2mOAfYEfh/9drr1hJfbuXjS2ojRw77G35w52TWrVsHwKu/f42Vq1ZzxMEHcds9D7Bq9WoAltYu32TcipWrWP7OCo4+7LNcd+m/8OLL8zbZ3v3DO9Oj+84b1wsnTZq0cdbYGlp7pvhLYB1wCjA+a/8yMNsrz9L7x5knH8vCN95kvxGnkFKiV88e/OyH1zDikCG8MOdVao76Mh/cYQeOPnQI3/761zaOe2fFSkadfj5r1q4lJbj2m+dvtu87rr+Mcy68kFWrVtGvXz9uu+22Vvs7ouj3PkfE8eVfDwPOAb4KLAYWp5QeK/dZD9yRUjojG3clcB7wr8DzwEnA2cDIlNLPKx23pqYmteTzFF1TbD7XFLfO1qwpfqx3vxauZnONrSl2CB//dLOHvvLKKwwYMGCTtoh4LqVUU79vU2aK9W/a/n759TFgePn3TuWf3CXACuBc4P8AvwNOLBKIktTWCodiSqniZ2Ua6pNS2gBcUf6RpA7N5ylKUsZQlKSMoShJGUNRkjKGoqSKOu1ew75HfIm9Dj2BL4w+l2XL32nR/Vcd9HneWlp6yMOH+g9p0X03VYf87LOkLRv0n31adH8vndng1x9vIv+Y3+hzx/G92+/hknPPbNE6OgpnipKa5G/2H8Qf/rQYgN8vfIMRp/wD+484maHHns7c+aUbwP+8eAnHnvEv7HP4Sexz+Ek8+eyLAPzt6eez/4iTGXjI8Uy867/a7W9ojDNFSYVt2LCBR3/zDGf83SgAxlx0BROuvIT+/Xrz9PP/w1e//u9Mu3ci//Rv/8Gwwftx/63XsGHDBlasLD2p+4fXfJOePbqzevUaDvj8qRx39GHs2nOXdvyLNmcoSqpo9Zq17HvEl/jDn/7CgP59OeLgwaxYuYonn3uJE86+aGO/te++C8C0J57lzu9eDpSeXtP9wzsDcMMPf8z9D08H4I03/8y8Ba8bipK2PXVriqtWr+bIk/+B793+U0478Qvs8uGdG3ykWENmPDmLqb9+hqcevJ1uXbsy/PizWLP23VauvOlcU5RUWLeuXbnh8ou45uZJdOvahb67f5x7H/wVUPqO5RfnvArAYZ89kB/cWXpcwoYNG1j+9jssf2cFPbrvTLeuXZk7fwEzn/+fdvs7GmMoSmqST+/1KQYN6M+Pf/ZL7r7pW9z6k5+xz+EnMfCQ45nyyAwAvnvZhUx/chZ7H3Yi+484hZdf/V9GDP8M6zdsYMCwLzL22zcyeL+92/cP2QJPn6VtTJFbaFrainlPbPLvB+/47sbff3n39zbr/7FeuzLltus2a3/4rpsa3P/Cp//6SL/6x2przhQlKWMoSlLGUJSkjKEodXCJRNGvDdHmmvreGYpSB/fasnWsX/W2wdgMKSWWLFlCly5dKncu8+qz1MHd+HQtXwP67PIWQcVvBWm2V2Jxq+27RSx/pVnDunTpwm677Va4v6EodXBvr32Pbz2+pNWP09G/DZFLl1fu0wI8fZakjKEoSRlDUZIyhqIkZQxFScoYipKUMRQlKWMoSlLGUJSkjKEoSRlDUZIyhqIkZQxFScoYipKUMRQlKWMoSlLGUJSkjKEoSRlDUZIyhUIxInaPiMkRsTwi3o6I+yKid8GxvSPijoh4PSJWR8SrEXFFROy0daVLUsur+MVVEdENmAasBUYDCbgCmB4Rg1JKKxsZuxMwFdgB+DfgdeAAYDzQHzhpa/8ASWpJRb7N7yygH7BnSmk+QES8BMwDzgaubWTsEErhd2RK6ZFy2/SI6AlcEBHdUkqrml29JLWwIqfPI4GZdYEIkFJaADwBjKow9oPl17frtS8rH7v1vsRWkpqhSCgOBGY30D4HqK4wdiqlGeVVEVEdER+KiEOBc4EJjZ16S1J7KBKKPYHaBtqXAj0aG5hSWgN8tnycOcA7wKPAz4F/3NK4iBgTEbMiYtbixYsLlChJLaNVb8mJiC7APcBHgVOBYcCFlC6wfG9L41JKE1NKNSmlml69erVmiZK0iSIXWmppeEa4pRlk7gxgOLBHSun35bbHI2I5MDEiJqSUXixarCS1tiIzxTmU1hXrqwZerjB2b6A2C8Q6z5RfBxQ4viS1mSKh+AAwOCL61TVERBWl220eqDD2T0CPiNijXvtB5dc/FKxTktpEkVC8BVgITImIURExEpgCvAHcXNcpIvpExPqIGJeNvZ3SxZWHImJ0RBwSERcCVwPPUbqtR5I6jIqhWL5t5lDgVWAScDewADg0pbQi6xpAp3yfKaWFwGDgBUqfgnmI0s3gE4EjUkrvtcQfIUktpciFFlJKrwPHVeizkAZuxk4pvQyc2JziJKmt+ZQcScoYipKUMRQlKWMoSlLGUJSkjKEoSRlDUZIyhqIkZQxFScoYipKUMRQlKWMoSlLGUJSkjKEoSRlDUZIyhqIkZQxFScoYipKUMRQlKWMoSlLGUJSkjKEoSRlDUZIyhqIkZQxFScoYipKUMRQlKWMoSlLGUJSkjKEoSRlDUZIyhqIkZQxFScoYipKUMRQlKWMoSlLGUJSkjKEoSRlDUZIyhUIxInaPiMkRsTwi3o6I+yKid9GDRMSAiLg3It6KiNUR8buIOLf5ZUtS6+hcqUNEdAOmAWuB0UACrgCmR8SglNLKCuNryuNnAGcCy4H+wIe2qnJJagUVQxE4C+gH7JlSmg8QES8B84CzgWu3NDAiPgDcCTyaUjo22zS92RVLUisqcvo8EphZF4gAKaUFwBPAqApjhwMDaCQ4JakjKRKKA4HZDbTPAaorjP1s+bVLRMyMiHUR8ZeIuCEiujalUElqC0VCsSdQ20D7UqBHhbEfL7/eAzwCHAH8B6W1xR9taVBEjImIWRExa/HixQVKlKSWUWRNcWvUhe5dKaVx5d9nREQn4MqIGJBSeqX+oJTSRGAiQE1NTWrlGiVpoyIzxVoanhFuaQaZW1J+/VW99kfKr58ucHxJajNFQnEOpXXF+qqBlwuMbcx7BY4vSW2mSCg+AAyOiH51DRFRBQwpb2vMw5TubzyyXvuI8uusYmVKUtsoEoq3AAuBKRExKiJGAlOAN4Cb6zpFRJ+IWB8RdWuHpJSWAP8OnBMR346IwyNiLDAOuCO/zUeSOoKKF1pSSisj4lDgOmASEMCjwHkppRVZ1wA6sXnQXga8A3wVuAD4I/Ad4PKtrl6SWlihq88ppdeB4yr0WUgpGOu3J0o3b3sDt6QOz6fkSFLGUJSkjKEoSRlDUZIyhqIkZQxFScoYipKUMRQlKWMoSlLGUJSkjKEoSRlDUZIyhqIkZQxFScoYipKUMRQlKWMoSlLGUJSkjKEoSRlDUZIyhqIkZQxFScoYipKUMRQlKWMoSlLGUJSkjKEoSRlDUZIyhqIkZQxFScoYipKUMRQlKWMoSlLGUJSkjKEoSRlDUZIyhqIkZQxFScoUCsWI2D0iJkfE8oh4OyLui4jeTT1YRIyNiBQRv2l6qZLU+iqGYkR0A6YBnwJGA6cC/YHpEbFT0QNFRD/gG8BfmleqJLW+zgX6nAX0A/ZMKc0HiIiXgHnA2cC1BY/1A+BuYM+Cx5WkNlfk9HkkMLMuEAFSSguAJ4BRRQ4SEScD+wFfb06RktRWioTiQGB2A+1zgOpKgyOiB3AdcFFKaWnTypOktlUkFHsCtQ20LwV6FBj/HeBV4PbiZUlS+2jVtb2IGAp8BdgvpZSaMG4MMAagd+8mX+SWpGYrMlOspeEZ4ZZmkLmbgVuBRRGxS0TsQimIO5X/vWNDg1JKE1NKNSmlml69ehUoUZJaRpGZ4hxK64r1VQMvVxg7oPxzTgPbaoF/Bq4vUIMktYkiofgAcHVE9Esp/S9ARFQBQ4CxFcYe0kDb9UAn4GvA/Aa2S1K7KRKKtwD/CEyJiG8ACbgceIPS6TEAEdEH+D1wWUrpMoCU0oz6O4uIZUDnhrZJUnuruKaYUloJHErpCvIkSjdgLwAOTSmtyLoGpRmgn6eWtM0qdPU5pfQ6cFyFPgspBWOlfQ0vckxJag/O6iQpYyhKUsZQlKSMoShJGUNRkjKGoiRlDEVJyhiKkpQxFCUpYyhKUsZQlKSMoShJGUNRkjKGoiRlDEVJyhiKkpQxFCUpYyhKUsZQlKSMoShJGUNRkjKGoiRlDEVJyhiKkpQxFCUpYyhKUsZQlKSMoShJGUNRkjKGoiRlDEVJyhiKkpQxFCUpYyhKUsZQlKSMoShJGUNRkjKGoiRlDEVJyhQKxYjYPSImR8TyiHg7Iu6LiN4FxtVExMSImBsRqyLi9Yi4OyL6bn3pktTyKoZiRHQDpgGfAkYDpwL9gekRsVOF4V8CBgI3AEcBY4H9gFkRsftW1C1JraJzgT5nAf2APVNK8wEi4iVgHnA2cG0jY69KKS3OGyLiCWBBeb/jmlO0JLWWIqfPI4GZdYEIkFJaADwBjGpsYP1ALLe9BiwGPtG0UiWp9RUJxYHA7Aba5wDVTT1gRAwAPgq80tSxktTaioRiT6C2gfalQI+mHCwiOgMTKM0Ub23KWElqC219S85NwGeAL6eUGgpaACJiTETMiohZixdvdgYuSa2mSCjW0vCMcEszyAZFxJXAGOD0lNIjjfVNKU1MKdWklGp69epV9BCStNWKXH2eQ2ldsb5q4OUiB4mIS4CLga+llCYVL0+S2laRmeIDwOCI6FfXEBFVwJDytkZFxD8BVwCXpJRuamadktQmioTiLcBCYEpEjIqIkcAU4A3g5rpOEdEnItZHxLis7UvA9cAvgWkRMTj7afKVa0lqbRVPn1NKKyPiUOA6YBIQwKPAeSmlFVnXADqxadCOKLePKP/kHgOGN7tySWoFRdYUSSm9DhxXoc9CSgGYt50GnNa80iSp7fmUHEnKGIqSlDEUJSljKEpSxlCUpIyhKEkZQ1GSMoaiJGUMRUnKGIqSlDEUJSljKEpSxlCUpIyhKEkZQ1GSMoaiJGUMRUnKGIqSlDEUJSljKEpSxlCUpIyhKEkZQ1GSMoaiJGUMRUnKGIqSlDEUJSljKEpSxlCUpIyhKEkZQ1GSMoaiJGUMRUnKGIqSlDEUJSljKEpSxlCUpIyhKEkZQ1GSMoVCMSJ2j4jJEbE8It6OiPsionfBsV0i4jsR8ceIWB0RT0XEwVtXtiS1joqhGBHdgGnAp4DRwKlAf2B6ROxU4Bi3AmcB44BjgD8C/x0R+zazZklqNZ0L9DkL6AfsmVKaDxARLwHzgLOBa7c0MCL2AU4GTk8p3VZuewyYA1wGjNyq6iWphRU5fR4JzKwLRICU0gLgCWBUgbHrgHuyseuBnwBHRsSOTa5YklpRkVAcCMxuoH0OUF1g7IKU0qoGxn4Q2KPA8SWpzRQJxZ5AbQPtS4EeWzG2brskdRhF1hTbXESMAcaU/7kiIn7XnvW0lYCPAG+1dx1bdkx7F9Ao37+t0+Hfv/HR0nvs01BjkVCspeEZ4ZZmgfXHNnTguhni0ga2kVKaCEwsUNv7SkTMSinVtHcd2yrfv63j+1dS5PR5DqW1wfqqgZcLjO1bvq2n/th3gfmbD5Gk9lMkFB8ABkdEv7qGiKgChpS3NeZBYAfghGxsZ+Ak4JGU0tqmFixJralIKN4CLASmRMSoiBgJTAHeAG6u6xQRfSJifUSMq2tLKf2W0u0410fEmRFxGKXbcfoC32y5P+N9Y7tbMmhhvn9bx/cPiJRS5U6lj/RdBxwBBPAocF5KaWHWpwpYAIxPKV2atXcFvkXpJu5dgBeBi1NKM1rmT5CkllMoFCVpe+FTciQpYyhKUsZQlKSModgOIqJrRJwXEdMj4s8R8W7558/ltvMauLdTTRARB0fEtPauo6OKiOERcUpE7LeF7Z/I7yTZnnihpY1FxO6Unk9ZRelJQ3PY9LPg1ZTuAX0NOCyl9Ho7lLnNi4jjgJ+mlDq1dy0dSUR8CHgEOIjSnSQJ+BWlx/u9mfU7CHhye3z/OuRnn9/nrgdWA/3zW5py5dubfkbpNqjj2qiubULRJ74DvVq1kG3XvwIDgNOAZ4HhwHjg6Yg4MqVU6VNq73vOFNtYRCwHvpxSerBCv5HApJRS97apbNsQEe9Rmt1U7Aqk7XGm05iImAt8P6V0Q9b2CUofyKgCjkopPetMUW2pKf8L+T/W5lYDjwOTK/Sr4a9PWtJf9QZ+mzeklP4QEcOAnwNTI2IUpfd5u2Qotr2pwLciYnb5CeabKZ8+X05prUebehHYkFK6tbFOEbEMQ7EhfwF2q9+YUloZEUcB/wX8ArimrQvrKAzFtnceMB14NSJmUnqqed0j2HpQeiLRYEqfN//ndqivo3sOOL5g3xZ/AN/7wCxKXyPy4/obUkpryrPEHwHfYDs9U3FNsR2UPw8+BvgCpRCse75kLaWr0Q8AtzTwNQ7bvfL61x4ppcfau5ZtUfmq/AXAMSmlJVvoE8D3gREppb5tWV9HYChKUsabtyUpYyhKUsZQlKSMoShJGUNRkjL/H2hNBW+cdPsUAAAAAElFTkSuQmCC\n",
420 | "text/plain": [
421 | ""
422 | ]
423 | },
424 | "metadata": {
425 | "needs_background": "light"
426 | },
427 | "output_type": "display_data"
428 | }
429 | ],
430 | "source": [
431 | "figsize = (5, 5)\n",
432 | "fontsize = 16\n",
433 | "\n",
434 | "fig_confusion = (\n",
435 | " class_summary_df[[\"TP\", \"FP\", \"FN\"]]\n",
436 | " .plot(kind=\"bar\", figsize=figsize, width=1, align=\"center\", fontsize=fontsize)\n",
437 | " .get_figure()\n",
438 | ")\n",
439 | "\n",
440 | "fig_pr = (\n",
441 | " class_summary_df[[\"Precision\", \"Recall\"]]\n",
442 | " .plot(kind=\"bar\", figsize=figsize, width=1, align=\"center\", fontsize=fontsize)\n",
443 | " .get_figure()\n",
444 | ")"
445 | ]
446 | },
447 | {
448 | "cell_type": "markdown",
449 | "id": "d6537775",
450 | "metadata": {},
451 | "source": [
452 | "## Coco Metrics"
453 | ]
454 | },
455 | {
456 | "cell_type": "markdown",
457 | "id": "ac07df8f",
458 | "metadata": {},
459 | "source": [
460 | "Calculate the coco metrics (from pycocotools) from the predictions and labels dataframes. Optionally choose to output per class values.\n",
461 | "The function returns the results as a dictionary"
462 | ]
463 | },
464 | {
465 | "cell_type": "code",
466 | "execution_count": 8,
467 | "id": "7f4c0682",
468 | "metadata": {},
469 | "outputs": [
470 | {
471 | "name": "stdout",
472 | "output_type": "stream",
473 | "text": [
474 | "creating index...\n",
475 | "index created!\n",
476 | "Loading and preparing results...\n",
477 | "DONE (t=0.00s)\n",
478 | "creating index...\n",
479 | "index created!\n",
480 | "Running per image evaluation...\n",
481 | "Evaluate annotation type *bbox*\n",
482 | "DONE (t=0.00s).\n",
483 | "Accumulating evaluation results...\n",
484 | "DONE (t=0.01s).\n",
485 | " Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 1.000\n",
486 | " Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 1.000\n",
487 | " Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 1.000\n",
488 | " Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = -1.000\n",
489 | " Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = -1.000\n",
490 | " Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 1.000\n",
491 | " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.806\n",
492 | " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 1.000\n",
493 | " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 1.000\n",
494 | " Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = -1.000\n",
495 | " Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = -1.000\n",
496 | " Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 1.000\n"
497 | ]
498 | },
499 | {
500 | "data": {
501 | "text/plain": [
502 | "{'All': {'AP_all': 1.0,\n",
503 | " 'AP_all_IOU_0_50': 1.0,\n",
504 | " 'AP_all_IOU_0_75': 1.0,\n",
505 | " 'AP_small': -1.0,\n",
506 | " 'AP_medium': -1.0,\n",
507 | " 'AP_large': 1.0,\n",
508 | " 'AR_all_dets_1': 0.8055555555555556,\n",
509 | " 'AR_all_dets_10': 1.0,\n",
510 | " 'AR_all': 1.0,\n",
511 | " 'AR_small': -1.0,\n",
512 | " 'AR_medium': -1.0,\n",
513 | " 'AR_large': 1.0}}"
514 | ]
515 | },
516 | "execution_count": 8,
517 | "metadata": {},
518 | "output_type": "execute_result"
519 | }
520 | ],
521 | "source": [
522 | "res = cm.get_coco_from_dfs(preds_df, labels_df, False)\n",
523 | "res"
524 | ]
525 | },
526 | {
527 | "cell_type": "code",
528 | "execution_count": null,
529 | "id": "477a30c2",
530 | "metadata": {},
531 | "outputs": [],
532 | "source": []
533 | },
534 | {
535 | "cell_type": "code",
536 | "execution_count": null,
537 | "id": "0146649d",
538 | "metadata": {},
539 | "outputs": [],
540 | "source": []
541 | }
542 | ],
543 | "metadata": {
544 | "kernelspec": {
545 | "display_name": "py38",
546 | "language": "python",
547 | "name": "py38"
548 | },
549 | "language_info": {
550 | "codemirror_mode": {
551 | "name": "ipython",
552 | "version": 3
553 | },
554 | "file_extension": ".py",
555 | "mimetype": "text/x-python",
556 | "name": "python",
557 | "nbconvert_exporter": "python",
558 | "pygments_lexer": "ipython3",
559 | "version": "3.8.8"
560 | }
561 | },
562 | "nbformat": 4,
563 | "nbformat_minor": 5
564 | }
565 |
--------------------------------------------------------------------------------
/objdetecteval/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/alexhock/object-detection-metrics/4ba0cfa7677f8f0b65776567b6ccb7b45ac3b1e0/objdetecteval/__init__.py
--------------------------------------------------------------------------------
/objdetecteval/data/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/alexhock/object-detection-metrics/4ba0cfa7677f8f0b65776567b6ccb7b45ac3b1e0/objdetecteval/data/__init__.py
--------------------------------------------------------------------------------
/objdetecteval/data/bbox_formats.py:
--------------------------------------------------------------------------------
1 | import math
2 |
3 |
4 | __all__ = [
5 | "denormalize_bbox_values",
6 | "convert_pascal_voc_bbox_to_cxcywh",
7 | "convert_cxcywh_bbox_to_corner_values",
8 | "convert_cxcywh_bbox_to_coco_format",
9 | "convert_cxcywh_bbox_to_pascal_voc_format",
10 | "convert_corner_bbox_to_pascal_voc",
11 | "get_rectangle_edges_from_corners_format_bbox",
12 | "get_rectangle_edges_from_coco_bbox",
13 | "get_rectangle_edges_from_pascal_bbox",
14 | ]
15 |
16 |
17 | def denormalize_bbox_values(
18 | normalised_x_centre,
19 | normalised_y_centre,
20 | normalised_width,
21 | normalised_height,
22 | im_width=2456,
23 | im_height=2052,
24 | bbox_format_conversion_fn=None,
25 | ):
26 | x_centre = normalised_x_centre * im_width
27 | y_centre = normalised_y_centre * im_height
28 | width = normalised_width * im_width
29 | height = normalised_height * im_height
30 |
31 | if bbox_format_conversion_fn is None:
32 | return [
33 | math.floor(x_centre),
34 | math.floor(y_centre),
35 | math.floor(width),
36 | math.floor(height),
37 | ]
38 | else:
39 | return bbox_format_conversion_fn(x_centre, y_centre, width, height)
40 |
41 |
42 | def convert_pascal_voc_bbox_to_cxcywh(xmin, ymin, xmax, ymax):
43 | width = xmax - xmin
44 | height = ymax - ymin
45 | x_centre = xmin + width / 2.0
46 | y_centre = ymin + height / 2.0
47 |
48 | return [x_centre, y_centre, width, height]
49 |
50 |
51 | def convert_cxcywh_bbox_to_corner_values(x_centre, y_centre, width, height):
52 | top = math.floor(y_centre + height / 2)
53 | left = math.floor(x_centre - width / 2)
54 | bottom = math.floor(y_centre - height / 2)
55 | right = math.floor(x_centre + width / 2)
56 | return [top, left, bottom, right]
57 |
58 |
59 | def convert_cxcywh_bbox_to_coco_format(x_centre, y_centre, width, height):
60 | x_min = math.floor(x_centre - width / 2)
61 | y_min = math.floor(y_centre - height / 2)
62 |
63 | return [x_min, y_min, width, height]
64 |
65 |
66 | def convert_cxcywh_bbox_to_pascal_voc_format(x_centre, y_centre, width, height):
67 | xmin = math.floor(x_centre - width / 2)
68 | ymin = math.floor(y_centre - height / 2)
69 | xmax = math.floor(x_centre + width / 2)
70 | ymax = math.floor(y_centre + height / 2)
71 |
72 | return [xmin, ymin, xmax, ymax]
73 |
74 |
75 | def convert_corner_bbox_to_pascal_voc(top, left, bottom, right):
76 | xmin = left
77 | ymin = bottom
78 | xmax = right
79 | ymax = top
80 |
81 | return [xmin, ymin, xmax, ymax]
82 |
83 |
84 | def get_rectangle_edges_from_corners_format_bbox(bbox):
85 | top, left, bottom, right = bbox
86 |
87 | bottom_left = (left, bottom)
88 | width = right - left
89 | height = top - bottom
90 |
91 | return bottom_left, width, height
92 |
93 |
94 | def get_rectangle_edges_from_coco_bbox(bbox):
95 | x_min, y_min, width, height = bbox
96 |
97 | bottom_left = (x_min, y_min)
98 |
99 | return bottom_left, width, height
100 |
101 |
102 | def get_rectangle_edges_from_pascal_bbox(bbox):
103 | xmin_top_left, ymin_top_left, xmax_bottom_right, ymax_bottom_right = bbox
104 |
105 | bottom_left = (xmin_top_left, ymax_bottom_right)
106 | width = xmax_bottom_right - xmin_top_left
107 | height = ymin_top_left - ymax_bottom_right
108 |
109 | return bottom_left, width, height
110 |
111 |
112 | def convert_pascal_bbox_to_coco(xmin, ymin, xmax, ymax):
113 | """
114 | pascal: top-left-x, top-left-y, x-bottom-right, y-bottom-right
115 | coco: top-left-x, top-left-y, width and height
116 | """
117 | return [xmin, ymin, xmax - xmin, ymax - ymin]
118 |
--------------------------------------------------------------------------------
/objdetecteval/metrics/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/alexhock/object-detection-metrics/4ba0cfa7677f8f0b65776567b6ccb7b45ac3b1e0/objdetecteval/metrics/__init__.py
--------------------------------------------------------------------------------
/objdetecteval/metrics/coco_metrics.py:
--------------------------------------------------------------------------------
1 | from pycocotools.coco import COCO
2 | from pycocotools.cocoeval import COCOeval
3 | from typing import List
4 | from objdetecteval.metrics.image_metrics import match_preds_to_targets
5 |
6 |
7 | __all__ = ["get_stats_at_annotation_level", "get_coco_stats", "get_coco_from_dfs"]
8 |
9 | from objdetecteval.data.bbox_formats import convert_pascal_bbox_to_coco
10 |
11 |
12 | class AMLCOCO(COCO):
13 | def __init__(self, annotation_gt=None):
14 |
15 | if annotation_gt is None or type(annotation_gt) == str:
16 | COCO.__init__(self, annotation_file=annotation_gt)
17 | else:
18 | COCO.__init__(self, annotation_file=None)
19 |
20 | self.dataset = annotation_gt
21 | self.createIndex()
22 |
23 |
24 | def get_stats_dict(stats=None, summ_type="bbox"):
25 | if summ_type == "bbox":
26 | if stats is None:
27 | stats = [-1] * 12
28 | r = {
29 | "AP_all": stats[0],
30 | "AP_all_IOU_0_50": stats[1],
31 | "AP_all_IOU_0_75": stats[2],
32 | "AP_small": stats[3],
33 | "AP_medium": stats[4],
34 | "AP_large": stats[5],
35 | "AR_all_dets_1": stats[6],
36 | "AR_all_dets_10": stats[7],
37 | "AR_all": stats[8],
38 | "AR_small": stats[9],
39 | "AR_medium": stats[10],
40 | "AR_large": stats[11],
41 | }
42 | return r
43 | return None
44 |
45 |
46 | def conv_image_ids_to_coco(image_ids):
47 | img_ids = set(image_ids)
48 | images = []
49 | for img_id in img_ids:
50 | images.extend(
51 | [
52 | {
53 | "id": img_id,
54 | },
55 | ]
56 | )
57 | return images
58 |
59 |
60 | def conv_class_labels_to_coco_cats(class_labels):
61 |
62 | cat_set = set()
63 | for label_list in class_labels:
64 | for label in label_list:
65 | cat_set.add(label)
66 |
67 | cats = []
68 | for cat in cat_set:
69 | cats.extend(
70 | [
71 | {
72 | "id": cat,
73 | }
74 | ]
75 | )
76 |
77 | return cats
78 |
79 |
80 | def conv_ground_truth_to_coco_annots(
81 | target_image_ids, target_class_labels, target_bboxes, conv_bbox_func=None
82 | ):
83 |
84 | # conv bbox to coco annotation
85 | annots = []
86 | ann_id = 1
87 | for target_image_id, ground_truth_boxes, ground_truth_labels in zip(
88 | target_image_ids, target_bboxes, target_class_labels
89 | ):
90 | for bbox, label in zip(ground_truth_boxes, ground_truth_labels):
91 |
92 | if conv_bbox_func:
93 | coco_bbox = conv_bbox_func(*bbox)
94 | else:
95 | coco_bbox = bbox
96 |
97 | annots.extend(
98 | [
99 | {
100 | "id": ann_id,
101 | "bbox": coco_bbox, # coco format: x, y, w, h
102 | "category_id": label,
103 | "image_id": target_image_id,
104 | "iscrowd": 0,
105 | "area": coco_bbox[2] * coco_bbox[3],
106 | }
107 | ]
108 | )
109 |
110 | ann_id += 1
111 |
112 | return annots
113 |
114 |
115 | def create_ground_truth(
116 | target_image_ids, target_class_labels, target_bboxes, conv_bbox_func=None
117 | ):
118 |
119 | cats = conv_class_labels_to_coco_cats(target_class_labels)
120 |
121 | images = conv_image_ids_to_coco(target_image_ids)
122 |
123 | annots = conv_ground_truth_to_coco_annots(
124 | target_image_ids, target_class_labels, target_bboxes, conv_bbox_func
125 | )
126 |
127 | return {"images": images, "annotations": annots, "categories": cats}
128 |
129 |
130 | def create_detections(
131 | prediction_image_ids,
132 | predicted_class_confidences,
133 | predicted_class_labels,
134 | predicted_bboxes,
135 | conv_bbox_func=None,
136 | ):
137 |
138 | detections = []
139 | for image_id, class_predictions, confidences, box_predictions in zip(
140 | prediction_image_ids,
141 | predicted_class_labels,
142 | predicted_class_confidences,
143 | predicted_bboxes,
144 | ):
145 | # add prediction boxes
146 | for class_prediction, class_prediction_confidence, bbox in zip(
147 | class_predictions, confidences, box_predictions
148 | ):
149 | if conv_bbox_func:
150 | coco_bbox = conv_bbox_func(*bbox)
151 | else:
152 | coco_bbox = bbox
153 |
154 | detections.extend(
155 | [
156 | {
157 | "bbox": coco_bbox, # coco format: x, y, w, h
158 | "category_id": class_prediction,
159 | "score": class_prediction_confidence,
160 | "image_id": image_id,
161 | }
162 | ]
163 | )
164 |
165 | return detections
166 |
167 |
168 | def get_stats_at_annotation_level(
169 | predicted_class_labels: List[List[int]],
170 | predicted_class_confidences: List[List[float]],
171 | predicted_bboxes: List[List[List[float]]],
172 | prediction_image_ids: List[int],
173 | target_image_ids: List[int],
174 | target_class_labels: List[List[int]],
175 | target_bboxes: List[List[List[float]]],
176 | conv_bbox_func=convert_pascal_bbox_to_coco,
177 | ):
178 | """
179 | :param predicted_class_labels: A list containing a list of class lolabels predicted per image
180 | :param predicted_class_confidences: A list containing a list of prediction confidence values per image
181 | :param predicted_bboxes: A list containing a list of bounding boxes, in Pascal VOC format, predicted per image
182 | :param prediction_image_ids: A list of image ids for each image in the prediction lists
183 | :param target_image_ids: A list of image ids for each image in the target lists
184 | :param target_class_labels: A list containing a list of ground truth class labels per image
185 | :param target_bboxes: A list containing a list of ground truth bounding boxes, in Pascal VOC format
186 | :param conv_bbox_func: A function to convert the format of incoming bboxes to coco format, can set to None
187 | :returns: a dictionary of the coco results. Returns all -1s if there are no predictions.
188 |
189 | """
190 |
191 | results = get_coco_stats(
192 | predicted_class_labels,
193 | predicted_class_confidences,
194 | predicted_bboxes,
195 | prediction_image_ids,
196 | target_image_ids,
197 | target_class_labels,
198 | target_bboxes,
199 | conv_bbox_func=conv_bbox_func,
200 | )
201 |
202 | return results["All"]
203 |
204 |
205 | def get_coco_stats(
206 | predicted_class_labels: List[List[int]],
207 | predicted_class_confidences: List[List[float]],
208 | predicted_bboxes: List[List[List[float]]],
209 | prediction_image_ids: List[int],
210 | target_image_ids: List[int],
211 | target_class_labels: List[List[int]],
212 | target_bboxes: List[List[List[float]]],
213 | conv_bbox_func=convert_pascal_bbox_to_coco,
214 | include_per_class=False,
215 | ):
216 | """
217 | :param predicted_class_labels: A list containing a list of class labels predicted per image
218 | :param predicted_class_confidences: A list containing a list of prediction confidence values per image
219 | :param predicted_bboxes: A list containing a list of bounding boxes, in Pascal VOC format, predicted per image
220 | :param prediction_image_ids: A list of image ids for each image in the prediction lists
221 | :param target_image_ids: A list of image ids for each image in the target lists
222 | :param target_class_labels: A list containing a list of ground truth class labels per image
223 | :param target_bboxes: A list containing a list of ground truth bounding boxes, in Pascal VOC format
224 | :param conv_bbox_func: A function to convert the format of incoming bboxes to coco format, can set to None
225 | :param include_per_class: Calculate and return per class result
226 | :returns: a dictionary of the coco results. Returns all -1s if there are no predictions.
227 |
228 | """
229 |
230 | results = {}
231 |
232 | # create coco result dictionary from predictions
233 | dt = create_detections(
234 | prediction_image_ids,
235 | predicted_class_confidences,
236 | predicted_class_labels,
237 | predicted_bboxes,
238 | conv_bbox_func=conv_bbox_func,
239 | )
240 |
241 | if len(dt) == 0:
242 | # no predictions so return all -1s.
243 | results["All"] = get_stats_dict(stats=None)
244 | return results
245 |
246 | # create coco dict for the ground truth
247 | gt = create_ground_truth(
248 | target_image_ids,
249 | target_class_labels,
250 | target_bboxes,
251 | conv_bbox_func=conv_bbox_func,
252 | )
253 |
254 | # load the coco dictionaries
255 | coco_gt = AMLCOCO(annotation_gt=gt)
256 | coco_dt = coco_gt.loadRes(dt)
257 |
258 | # do the eval
259 | coco_eval = COCOeval(coco_gt, coco_dt, "bbox")
260 | image_ids = coco_gt.getImgIds()
261 | coco_eval.params.imgIds = image_ids
262 | coco_eval.evaluate()
263 | coco_eval.accumulate()
264 | coco_eval.summarize()
265 | results["All"] = get_stats_dict(coco_eval.stats)
266 |
267 | if include_per_class:
268 | class_labels = coco_gt.getCatIds()
269 | for class_label in class_labels:
270 | coco_eval.params.catIds = [class_label]
271 | image_ids = coco_gt.getImgIds()
272 | coco_eval.params.imgIds = image_ids
273 | coco_eval.evaluate()
274 | coco_eval.accumulate()
275 | coco_eval.summarize()
276 | results[class_label] = get_stats_dict(coco_eval.stats)
277 |
278 | return results
279 |
280 |
281 | def get_coco_from_dfs(predictions_df, labels_df, output_per_class_metrics=False):
282 | """
283 | Convert the dataframes to the lists to get the coco metrics
284 | the output_per_class_metrics=True will output coco metrics for each class.
285 | Assumes pascal boxes
286 | in addition to the mAP scores across all classes.
287 | """
288 |
289 | # get the matched results
290 | mr = match_preds_to_targets(predictions_df, labels_df)
291 |
292 | image_names = mr['image_ids']
293 | num_image_names = len(image_names)
294 |
295 | # convert image_names to ids. names should be unique due to
296 | # the matching process in match_preds_to_targets.
297 | assert len(set(image_names)) == num_image_names, "image names should be unique"
298 | int_image_ids = [int(i) for i in range(num_image_names)]
299 |
300 | # assume pascal boxes
301 | res = get_coco_stats(
302 | predicted_class_labels=mr['predicted_class_labels'],
303 | predicted_class_confidences=mr['predicted_class_confidences'],
304 | predicted_bboxes=mr['predicted_bboxes'],
305 | prediction_image_ids=int_image_ids,
306 | target_image_ids=int_image_ids,
307 | target_class_labels=mr['target_class_labels'],
308 | target_bboxes=mr['target_bboxes'],
309 | include_per_class=output_per_class_metrics
310 | )
311 | return res
--------------------------------------------------------------------------------
/objdetecteval/metrics/image_metrics.py:
--------------------------------------------------------------------------------
1 | from objdetecteval.metrics.iou import iou
2 | import pandas as pd
3 | from typing import List
4 |
5 | __all__ = [
6 | "get_inference_metrics",
7 | "summarise_inference_metrics",
8 | "match_preds_to_targets",
9 | "get_inference_metrics_from_df"
10 | ]
11 |
12 |
13 | def get_inference_metrics_from_df(predictions_df, labels_df):
14 |
15 | matched_bounding_boxes = match_preds_to_targets(
16 | predictions_df, labels_df
17 | )
18 |
19 | return get_inference_metrics(**matched_bounding_boxes)
20 |
21 |
22 | def get_unique_image_names(predictions_df, labels_df):
23 | # get unique image names from both preds and labels
24 | # need from both to capture images where there were no predictions
25 | # and images where there were predictions but no labels
26 | unique_preds_images = predictions_df['image_name'].unique().tolist()
27 | unique_label_images = labels_df['image_name'].unique().tolist()
28 | unique_images = sorted(list(set([*unique_preds_images, *unique_label_images])))
29 | return unique_images
30 |
31 |
32 | def match_preds_to_targets(predictions_df, labels_df):
33 |
34 | # check for required df columns
35 | pred_required_columns = ['image_name', 'xmin', 'xmax', 'ymin', 'ymax', 'label', 'score']
36 | assert all(col in predictions_df.columns for col in pred_required_columns), \
37 | f"missing or different column names - should be: {pred_required_columns}"
38 | label_required_columns = ['image_name', 'xmin', 'xmax', 'ymin', 'ymax', 'label']
39 | assert all(col in labels_df.columns for col in label_required_columns), \
40 | f"missing or diferent column names - should be {label_required_columns}"
41 |
42 | image_names = []
43 | predicted_class_labels = []
44 | predicted_bboxes = []
45 | predicted_class_confidences = []
46 | target_class_labels = []
47 | target_bboxes = []
48 | image_preds = {}
49 |
50 | unique_images = get_unique_image_names(predictions_df, labels_df)
51 |
52 | # index the dataframes by the image_name
53 | preds_df_indexed = predictions_df.set_index('image_name')
54 | labels_df_indexed = labels_df.set_index('image_name')
55 |
56 | # loop through individual images
57 | for image_name in unique_images:
58 |
59 | # get the predictions and labels for each image
60 | preds = preds_df_indexed.loc[image_name]
61 | labels = labels_df_indexed.loc[image_name]
62 |
63 | # create lists for all the bounding boxes, labels and scores
64 | # for the image, pascal boxes
65 | # [[xmin, ymin, xmax, ymax], []]
66 | # [label, label]
67 | # [score, score]
68 | pred_image_bboxes = preds[['xmin', 'ymin', 'xmax', 'ymax']].values.tolist()
69 | pred_image_class_labels = preds['label'].values.tolist()
70 | pred_image_class_confs = preds['score'].values.tolist()
71 |
72 | # add the predictions lists for the image
73 | image_names.append(image_name)
74 | predicted_class_labels.append(pred_image_class_labels)
75 | predicted_class_confidences.append(pred_image_class_confs)
76 | predicted_bboxes.append(pred_image_bboxes)
77 |
78 | # create lists of the label bboxes and classes
79 | labels_image_bboxes = labels[['xmin', 'ymin', 'xmax', 'ymax']].values.tolist()
80 | labels_image_class_labels = labels['label'].values.tolist()
81 |
82 | # add the label lists for the image
83 | target_class_labels.append(labels_image_class_labels)
84 | target_bboxes.append(labels_image_bboxes)
85 |
86 | return {
87 | "image_ids": image_names,
88 | "predicted_class_labels": predicted_class_labels,
89 | "predicted_bboxes": predicted_bboxes,
90 | "predicted_class_confidences": predicted_class_confidences,
91 | "target_class_labels": target_class_labels,
92 | "target_bboxes": target_bboxes
93 | }
94 |
95 |
96 |
97 | def calc_iou(pred_bbox, true_bboxes):
98 | iou_val = 0.0
99 | for true_bbox in true_bboxes:
100 | # assumes pascal
101 | box_iou_val = iou(pred_bbox, true_bbox)
102 | if box_iou_val > iou_val:
103 | iou_val = box_iou_val
104 | return iou_val
105 |
106 |
107 | def calculate_detections(
108 | all_image_ids,
109 | all_pred_classes,
110 | all_pred_bboxes,
111 | all_pred_confs,
112 | all_true_classes,
113 | all_true_bboxes,
114 | do_iou_calc=True,
115 | ):
116 | assert len(all_image_ids) == len(all_pred_bboxes) == len(all_true_bboxes)
117 |
118 | # ["image_id", "class", "TP", "TN", "FP", "FN", "Confidence", "IoU"]
119 | detections = []
120 |
121 | for image_id, pred_classes, pred_boxes, pred_confs, true_classes, true_boxes in zip(
122 | all_image_ids,
123 | all_pred_classes,
124 | all_pred_bboxes,
125 | all_pred_confs,
126 | all_true_classes,
127 | all_true_bboxes,
128 | ):
129 |
130 | # loop through the predicted boxes for the image
131 | for pred_class, pred_box, pred_conf in zip(
132 | pred_classes, pred_boxes, pred_confs
133 | ):
134 | if pred_class in true_classes:
135 | if do_iou_calc:
136 | box_iou = calc_iou(pred_box, true_boxes)
137 | detections.append(
138 | [image_id, pred_class, 1, 0, 0, 0, pred_conf, box_iou]
139 | )
140 | else:
141 | # true positive
142 | detections.append([image_id, pred_class, 1, 0, 0, 0, pred_conf, -1])
143 |
144 | continue
145 |
146 | if pred_class not in true_classes:
147 | # false positive
148 | detections.append([image_id, pred_class, 0, 0, 1, 0, pred_conf, -1])
149 | continue
150 |
151 | # false negatives
152 | for true_class in true_classes:
153 | if true_class not in pred_classes:
154 | detections.append([image_id, true_class, 0, 0, 0, 1, 0, -1])
155 |
156 | return detections
157 |
158 |
159 | def summarise_inference_metrics(inference_df):
160 |
161 | class_stats = inference_df.groupby("class")[["TP", "FP", "FN"]].sum()
162 |
163 | # total number for each class
164 | class_stats["Total"] = class_stats[["TP", "FP", "FN"]].sum(axis=1)
165 |
166 | class_stats["Precision"] = class_stats["TP"] / (
167 | class_stats["TP"] + class_stats["FP"]
168 | )
169 | class_stats["Recall"] = class_stats["TP"] / (class_stats["TP"] + class_stats["FN"])
170 |
171 | # remove the index creatd by the groupby so the class is a column
172 | class_stats = class_stats.reset_index()
173 |
174 | return class_stats
175 |
176 |
177 | def get_inference_metrics(
178 | image_ids: List[int],
179 | predicted_class_labels: List[List[int]],
180 | predicted_bboxes: List[List[List[float]]],
181 | predicted_class_confidences: List[List[float]],
182 | target_class_labels: List[List[int]],
183 | target_bboxes: List[List[List[float]]],
184 | ):
185 | """
186 | Create metrics that do not include IoU. IoU is calculated but is not used to calculate precision and recall.
187 |
188 | Converts the outputs from the models into inference dataframes containing evaluation metrics such as
189 | precision and recall, and TP, FP, FN, confidence. Useful for more detailed analysis of results and plotting.
190 |
191 | :param image_ids: A list of image ids for each image in the order of the prediction and target lists
192 | :param predicted_class_labels: A list containing a list of class labels predicted per image
193 | :param predicted_class_confidences: A list containing a list of prediction confidence values per image
194 | :param predicted_bboxes: A list containing a list of bounding boxes, in Pascal VOC format, predicted per image
195 | :param target_class_labels: A list containing a list of ground truth class labels per image
196 | :param target_bboxes: A list containing a list of ground truth bounding boxes, in Pascal VOC format
197 | :param conv_bbox_func: A function to convert the format of incoming bboxes to pascal format, default is None
198 | :returns: a DataFrame of the results, and a dataframe containing precision and recall.
199 | """
200 |
201 | detections = calculate_detections(
202 | image_ids,
203 | predicted_class_labels,
204 | predicted_bboxes,
205 | predicted_class_confidences,
206 | target_class_labels,
207 | target_bboxes,
208 | )
209 |
210 | inference_df = pd.DataFrame(
211 | detections,
212 | columns=["image_id", "class", "TP", "TN", "FP", "FN", "Confidence", "IoU"],
213 | )
214 |
215 | return inference_df
216 |
--------------------------------------------------------------------------------
/objdetecteval/metrics/iou.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 |
4 | def boxes_intersect(box_a, box_b):
5 | if box_a[0] > box_b[2]:
6 | return False # boxA is right of boxB
7 | if box_b[0] > box_a[2]:
8 | return False # boxA is left of boxB
9 | if box_a[3] < box_b[1]:
10 | return False # boxA is above boxB
11 | if box_a[1] > box_b[3]:
12 | return False # boxA is below boxB
13 | return True
14 |
15 |
16 | def get_intersection_area(box_a, box_b):
17 | if boxes_intersect(box_a, box_b) is False:
18 | return 0
19 | xA = max(box_a[0], box_b[0])
20 | yA = max(box_a[1], box_b[1])
21 | xB = min(box_a[2], box_b[2])
22 | yB = min(box_a[3], box_b[3])
23 | # intersection area
24 | return (xB - xA + 1) * (yB - yA + 1)
25 |
26 |
27 | def get_area(box):
28 | return (box[2] - box[0] + 1) * (box[3] - box[1] + 1)
29 |
30 |
31 | def get_union_areas(box_a, box_b, interArea=None):
32 | area_A = get_area(box_a)
33 | area_B = get_area(box_b)
34 | if interArea is None:
35 | interArea = get_intersection_area(box_a, box_b)
36 | return float(area_A + area_B - interArea)
37 |
38 |
39 | def iou(box_a, box_b):
40 | # if boxes dont intersect
41 | if boxes_intersect(box_a, box_b) is False:
42 | return 0
43 | inter_area = get_intersection_area(box_a, box_b)
44 | union = get_union_areas(box_a, box_b, interArea=inter_area)
45 | # intersection over union
46 | iou = inter_area / union
47 | assert iou >= 0
48 | return iou
49 |
--------------------------------------------------------------------------------
/objdetecteval/test/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/alexhock/object-detection-metrics/4ba0cfa7677f8f0b65776567b6ccb7b45ac3b1e0/objdetecteval/test/__init__.py
--------------------------------------------------------------------------------
/objdetecteval/test/data/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/alexhock/object-detection-metrics/4ba0cfa7677f8f0b65776567b6ccb7b45ac3b1e0/objdetecteval/test/data/__init__.py
--------------------------------------------------------------------------------
/objdetecteval/test/data/test_bbox_formats.py:
--------------------------------------------------------------------------------
1 | from unittest.mock import MagicMock, Mock
2 |
3 | from pytest import fixture, mark
4 |
5 | from objdetecteval.data.bbox_formats import (
6 | convert_cxcywh_bbox_to_corner_values,
7 | convert_cxcywh_bbox_to_coco_format,
8 | convert_cxcywh_bbox_to_pascal_voc_format,
9 | denormalize_bbox_values,
10 | convert_pascal_voc_bbox_to_cxcywh,
11 | )
12 |
13 | X_CENTRE = 1
14 | Y_CENTRE = 1
15 | WIDTH = 2
16 | HEIGHT = 2
17 |
18 | IM_WIDTH = 5
19 | IM_HEIGHT = 6
20 |
21 |
22 | @fixture
23 | def square_bbox():
24 | return [X_CENTRE, Y_CENTRE, WIDTH, HEIGHT]
25 |
26 |
27 | def test_can_convert_bbox_to_corner_format(square_bbox):
28 | expected_bbox = [2, 0, 0, 2]
29 |
30 | converted_bbox = convert_cxcywh_bbox_to_corner_values(*square_bbox)
31 |
32 | assert expected_bbox == converted_bbox
33 |
34 |
35 | def test_can_convert_bbox_to_coco_format(square_bbox):
36 | expected_bbox = [0, 0, WIDTH, HEIGHT]
37 |
38 | converted_bbox = convert_cxcywh_bbox_to_coco_format(*square_bbox)
39 |
40 | assert expected_bbox == converted_bbox
41 |
42 |
43 | def test_convert_bbox_to_pascal_voc_format(square_bbox):
44 | expected_bbox = [0, 0, 2, 2]
45 |
46 | converted_bbox = convert_cxcywh_bbox_to_pascal_voc_format(*square_bbox)
47 |
48 | assert expected_bbox == converted_bbox
49 |
50 |
51 | def test_can_denormalize_bbox(square_bbox):
52 | expected_bbox = [
53 | X_CENTRE * IM_WIDTH,
54 | Y_CENTRE * IM_HEIGHT,
55 | WIDTH * IM_WIDTH,
56 | HEIGHT * IM_HEIGHT,
57 | ]
58 |
59 | denormalized_bbox = denormalize_bbox_values(
60 | *square_bbox, im_height=IM_HEIGHT, im_width=IM_WIDTH
61 | )
62 |
63 | assert expected_bbox == denormalized_bbox
64 |
65 |
66 | def test_can_denormalize_and_convert_bbox(square_bbox):
67 | expected_denormalized_bbox = [
68 | X_CENTRE * IM_WIDTH,
69 | Y_CENTRE * IM_HEIGHT,
70 | WIDTH * IM_WIDTH,
71 | HEIGHT * IM_HEIGHT,
72 | ]
73 | expected_bbox = Mock()
74 | conversion_fn = MagicMock(return_value=expected_bbox)
75 |
76 | denormalized_bbox = denormalize_bbox_values(
77 | *square_bbox,
78 | im_height=IM_HEIGHT,
79 | im_width=IM_WIDTH,
80 | bbox_format_conversion_fn=conversion_fn
81 | )
82 |
83 | conversion_fn.assert_called_once_with(*expected_denormalized_bbox)
84 | assert expected_bbox == denormalized_bbox
85 |
86 |
87 | def test_can_convert_pascal_voc_to_cxcywh():
88 | expected_bbox = [
89 | X_CENTRE * IM_WIDTH,
90 | Y_CENTRE * IM_HEIGHT,
91 | WIDTH * IM_WIDTH,
92 | HEIGHT * IM_HEIGHT,
93 | ]
94 | pascal_voc_box = convert_cxcywh_bbox_to_pascal_voc_format(*expected_bbox)
95 |
96 | converted_box = convert_pascal_voc_bbox_to_cxcywh(*pascal_voc_box)
97 |
98 | assert expected_bbox == converted_box
99 |
--------------------------------------------------------------------------------
/objdetecteval/test/metrics/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/alexhock/object-detection-metrics/4ba0cfa7677f8f0b65776567b6ccb7b45ac3b1e0/objdetecteval/test/metrics/__init__.py
--------------------------------------------------------------------------------
/objdetecteval/test/metrics/test_coco_metrics.py:
--------------------------------------------------------------------------------
1 | from pytest import fixture, approx
2 |
3 | from objdetecteval.metrics.coco_metrics import (
4 | get_stats_at_annotation_level,
5 | conv_image_ids_to_coco,
6 | conv_class_labels_to_coco_cats,
7 | get_coco_stats,
8 | )
9 |
10 |
11 | @fixture
12 | def predictions():
13 | # two classes
14 | # two images
15 | # two bounding box predictions for each image
16 | # confidence level
17 | # one bounding box target ground truth for each image
18 | batch = {
19 | "predicted_class_labels": [
20 | [
21 | 0,
22 | 0,
23 | ],
24 | [1, 0],
25 | ],
26 | "predicted_class_confidences": [[0.6, 0.3], [0.6, 0.3]],
27 | "predicted_bboxes": [
28 | # image 0
29 | [[750.65, 276.56, 963.77, 369.68], [60, 60, 50, 50]],
30 | # image 1
31 | [[1750.65, 276.56, 1963.77, 369.68], [60, 60, 50, 50]],
32 | ],
33 | "prediction_image_ids": [0, 1],
34 | "target_image_ids": [0, 1],
35 | "target_class_labels": [
36 | [0],
37 | [1],
38 | ],
39 | "target_bboxes": [
40 | # image 0
41 | [
42 | [750.65, 276.56, 963.77, 369.68],
43 | ],
44 | # image 1
45 | [
46 | [750.65, 276.56, 963.77, 369.68],
47 | ],
48 | ],
49 | }
50 |
51 | expected_result = {
52 | "AP_all": 0.5,
53 | "AP_all_IOU_0_50": 0.5,
54 | "AP_all_IOU_0_75": 0.5,
55 | "AP_small": -1.0,
56 | "AP_medium": -1.0,
57 | "AP_large": 0.5,
58 | "AR_all_dets_1": 0.5,
59 | "AR_all_dets_10": 0.5,
60 | "AR_all": 0.5,
61 | "AR_small": -1.0,
62 | "AR_medium": -1.0,
63 | "AR_large": 0.5,
64 | }
65 |
66 | return (batch, expected_result)
67 |
68 |
69 | def test_get_stats_at_annotation_level_no_bbox_func(predictions):
70 |
71 | batch_predictions, expected_results = predictions
72 | batch_predictions["conv_bbox_func"] = None
73 | coco_results = get_stats_at_annotation_level(**batch_predictions)
74 | # results should be the same as bboxes remain the same
75 | assert coco_results == approx(expected_results)
76 |
77 |
78 | def test_get_stats_at_annotation_level(predictions):
79 |
80 | batch_predictions, expected_results = predictions
81 | coco_results = get_stats_at_annotation_level(**batch_predictions)
82 |
83 | assert coco_results == approx(expected_results)
84 |
85 |
86 | def test_conv_class_labels_to_coco_cats():
87 |
88 | expected_result = [{"id": 0}, {"id": 1}, {"id": 2}]
89 |
90 | class_labels = [[2, 0], [1, 0], [2, 0]]
91 |
92 | # need unique list of categories
93 | coco_cats = conv_class_labels_to_coco_cats(class_labels)
94 | assert coco_cats == expected_result
95 |
96 |
97 | def test_conv_image_ids_to_coco():
98 |
99 | expected_result = [{"id": 0}, {"id": 1}, {"id": 2}]
100 |
101 | # duplicate images outputs single ids
102 | image_ids = [0, 1, 2, 2]
103 |
104 | image_dict = conv_image_ids_to_coco(image_ids)
105 | assert image_dict == expected_result
106 |
107 |
108 | def test_get_coco_stats(predictions):
109 |
110 | batch_predictions, expected_results = predictions
111 | batch_predictions["conv_bbox_func"] = None
112 | batch_predictions["include_per_class"] = False
113 | coco_results = get_coco_stats(**batch_predictions)
114 |
115 | assert coco_results["All"] == approx(expected_results)
116 |
117 |
118 | def test_get_coco_stats_class_level(predictions):
119 |
120 | batch_predictions, expected_results = predictions
121 | batch_predictions["conv_bbox_func"] = None
122 | batch_predictions["include_per_class"] = True
123 | coco_results = get_coco_stats(**batch_predictions)
124 |
125 | class_0_results = {
126 | "AP_all": 0.9999999,
127 | "AP_all_IOU_0_50": 0.9999999,
128 | "AP_all_IOU_0_75": 0.9999999,
129 | "AP_large": 0.9999999,
130 | "AP_medium": -1.0,
131 | "AP_small": -1.0,
132 | "AR_all": 1.0,
133 | "AR_all_dets_1": 1.0,
134 | "AR_all_dets_10": 1.0,
135 | "AR_large": 1.0,
136 | "AR_medium": -1.0,
137 | "AR_small": -1.0,
138 | }
139 | class_1_results = {
140 | "AP_all": 0.0,
141 | "AP_all_IOU_0_50": 0.0,
142 | "AP_all_IOU_0_75": 0.0,
143 | "AP_large": 0.0,
144 | "AP_medium": -1.0,
145 | "AP_small": -1.0,
146 | "AR_all": 0.0,
147 | "AR_all_dets_1": 0.0,
148 | "AR_all_dets_10": 0.0,
149 | "AR_large": 0.0,
150 | "AR_medium": -1.0,
151 | "AR_small": -1.0,
152 | }
153 |
154 | assert coco_results[0] == approx(class_0_results)
155 | assert coco_results[1] == approx(class_1_results)
156 |
--------------------------------------------------------------------------------
/objdetecteval/test/metrics/test_image_metrics.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 | from pandas._testing import assert_frame_equal
3 | from pytest import fixture, approx
4 |
5 | from objdetecteval.metrics.image_metrics import (
6 | get_inference_metrics,
7 | summarise_inference_metrics,
8 | )
9 |
10 |
11 | @fixture
12 | def predictions():
13 | # two classes
14 | # two images
15 | # two bounding box predictions for each image
16 | # confidence level
17 | # one bounding box target ground truth for each image
18 | batch = {
19 | "predicted_class_labels": [
20 | [
21 | 0,
22 | 0,
23 | ],
24 | [1, 0],
25 | ],
26 | "predicted_class_confidences": [[0.6, 0.3], [0.6, 0.3]],
27 | "predicted_bboxes": [
28 | # image 0
29 | [[750.65, 276.56, 963.77, 369.68], [60, 60, 50, 50]],
30 | # image 1
31 | [[1750.65, 276.56, 1963.77, 369.68], [60, 60, 50, 50]],
32 | ],
33 | "prediction_image_ids": [0, 1],
34 | "target_image_ids": [0, 1],
35 | "target_class_labels": [
36 | [0],
37 | [1],
38 | ],
39 | "target_bboxes": [
40 | # image 0
41 | [
42 | [750.65, 276.56, 963.77, 369.68],
43 | ],
44 | # image 1
45 | [
46 | [750.65, 276.56, 963.77, 369.68],
47 | ],
48 | ],
49 | }
50 |
51 | expected_inference_df = pd.DataFrame(
52 | {
53 | "image_id": [0, 0, 1, 1],
54 | "class": [0, 0, 1, 0],
55 | "TP": [1, 1, 1, 0],
56 | "TN": [0, 0, 0, 0],
57 | "FP": [0, 0, 0, 1],
58 | "FN": [0, 0, 0, 0],
59 | "Confidence": [0.6, 0.3, 0.6, 0.3],
60 | "IoU": [1.0, 0.0, 0.0, -1.0],
61 | }
62 | )
63 |
64 | expected_class_metrics_df = pd.DataFrame(
65 | {
66 | "class": [0, 1],
67 | "TP": [2, 1],
68 | "FP": [1, 0],
69 | "FN": [0, 0],
70 | "Total": [3, 1],
71 | "Precision": [0.666667, 1.0],
72 | "Recall": [1.0, 1.0],
73 | }
74 | )
75 |
76 | return (batch, expected_inference_df, expected_class_metrics_df)
77 |
78 |
79 | def test_get_inference_metrics(predictions):
80 |
81 | preds, expected_inference_df, _ = predictions
82 |
83 | inference_df = get_inference_metrics(
84 | image_ids=preds["prediction_image_ids"],
85 | predicted_class_labels=preds["predicted_class_labels"],
86 | predicted_class_confidences=preds["predicted_class_confidences"],
87 | predicted_bboxes=preds["predicted_bboxes"],
88 | target_class_labels=preds["target_class_labels"],
89 | target_bboxes=preds["target_bboxes"],
90 | )
91 |
92 | assert_frame_equal(expected_inference_df, inference_df)
93 |
94 |
95 | def test_summarise_inference_metrics(predictions):
96 |
97 | preds, _, expected_class_metrics_df = predictions
98 |
99 | inference_df = get_inference_metrics(
100 | image_ids=preds["prediction_image_ids"],
101 | predicted_class_labels=preds["predicted_class_labels"],
102 | predicted_class_confidences=preds["predicted_class_confidences"],
103 | predicted_bboxes=preds["predicted_bboxes"],
104 | target_class_labels=preds["target_class_labels"],
105 | target_bboxes=preds["target_bboxes"],
106 | )
107 |
108 | summary_metrics_df = summarise_inference_metrics(inference_df)
109 |
110 | assert_frame_equal(
111 | expected_class_metrics_df, summary_metrics_df, check_exact=False, rtol=1e-4
112 | )
113 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | pycocotools~=2.0.2
2 | pandas
3 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/.env python
2 | # -*- coding: utf-8 -*-
3 |
4 | import io
5 | import os
6 |
7 | from pkg_resources import Requirement
8 | from setuptools import find_packages, setup
9 |
10 | # Package meta-data.
11 | NAME = "objdetecteval"
12 | DESCRIPTION = ""
13 | URL = "repo"
14 | EMAIL = ""
15 | AUTHOR = "Microsoft CSE"
16 | REQUIRES_PYTHON = ">=3.6.0"
17 | VERSION = 0.01
18 |
19 | FILEPATH = os.path.abspath(os.path.dirname(__file__))
20 | REQUIRED = []
21 |
22 | with open("requirements.txt", "r") as f:
23 | for line in f.readlines():
24 | try:
25 | REQUIRED.append(str(Requirement.parse(line)))
26 | except ValueError:
27 | pass
28 |
29 | # Import the README and use it as the long-description.
30 | # Note: this will only work if 'README.md' is present in your MANIFEST.in file!
31 | try:
32 | with io.open(os.path.join(FILEPATH, "README.md"), encoding="utf-8") as f:
33 | LONG_DESCRIPTION = "\n" + f.read()
34 | except FileNotFoundError:
35 | LONG_DESCRIPTION = DESCRIPTION
36 |
37 | # Where the magic happens:
38 | setup(
39 | name=NAME,
40 | version=VERSION,
41 | description=DESCRIPTION,
42 | long_description=LONG_DESCRIPTION,
43 | long_description_content_type="text/markdown",
44 | author=AUTHOR,
45 | author_email=EMAIL,
46 | python_requires=REQUIRES_PYTHON,
47 | url=URL,
48 | packages=find_packages(
49 | exclude=["tests", "*.tests", "*.tests.*", "tests.*", "test"]
50 | ),
51 | scripts=[],
52 | install_requires=REQUIRED,
53 | include_package_data=True,
54 | classifiers=[
55 | "License :: Other/Proprietary License",
56 | "Programming Language :: Python",
57 | "Programming Language :: Python :: 3",
58 | "Programming Language :: Python :: 3.7",
59 | "Programming Language :: Python :: Implementation :: CPython",
60 | "Programming Language :: Python :: Implementation :: PyPy",
61 | ],
62 | )
63 |
--------------------------------------------------------------------------------