├── cocoapi ├── PythonAPI │ ├── pycocotools │ │ ├── __init__.py │ │ ├── mask.py │ │ ├── _cocoeval.pyx │ │ └── _mask.pyx │ ├── Makefile │ ├── setup.py │ └── pycocoEvalDemo.ipynb ├── MatlabAPI │ ├── private │ │ ├── gasonMex.mexa64 │ │ ├── gasonMex.mexmaci64 │ │ ├── getPrmDflt.m │ │ └── gasonMex.cpp │ ├── cocoDemo.m │ ├── evalDemo.m │ ├── gason.m │ ├── MaskApi.m │ ├── CocoApi.m │ └── CocoUtils.m ├── .travis.yml ├── LuaAPI │ ├── env.lua │ ├── init.lua │ ├── cocoDemo.lua │ ├── rocks │ │ └── coco-scm-1.rockspec │ ├── MaskApi.lua │ └── CocoApi.lua ├── README.txt ├── license.txt └── common │ ├── maskApi.h │ ├── gason.h │ ├── maskApi.c │ └── gason.cpp ├── .gitignore ├── README.md └── detectron2 └── evaluation ├── f_boundary.py ├── namingerror_evaluator.py ├── tpmqscore_evaluator.py ├── connectiveness_evaluator.py ├── f1score_evaluator.py └── lrp_evaluator.py /cocoapi/PythonAPI/pycocotools/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = 'tylin' 2 | -------------------------------------------------------------------------------- /cocoapi/MatlabAPI/private/gasonMex.mexa64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rohitrango/beyond-map/HEAD/cocoapi/MatlabAPI/private/gasonMex.mexa64 -------------------------------------------------------------------------------- /cocoapi/MatlabAPI/private/gasonMex.mexmaci64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rohitrango/beyond-map/HEAD/cocoapi/MatlabAPI/private/gasonMex.mexmaci64 -------------------------------------------------------------------------------- /cocoapi/.travis.yml: -------------------------------------------------------------------------------- 1 | group: travis_latest 2 | language: python 3 | cache: pip 4 | python: 5 | - 2.7 6 | - 3.6 7 | install: 8 | - pip install --upgrade pip 9 | - pip install pycocotools 10 | script: 11 | - true 12 | -------------------------------------------------------------------------------- /cocoapi/PythonAPI/Makefile: -------------------------------------------------------------------------------- 1 | all: 2 | # install pycocotools locally 3 | python setup.py build_ext --inplace 4 | rm -rf build 5 | 6 | install: 7 | # install pycocotools to the Python site-packages 8 | python setup.py build_ext install 9 | rm -rf build -------------------------------------------------------------------------------- /cocoapi/LuaAPI/env.lua: -------------------------------------------------------------------------------- 1 | --[[---------------------------------------------------------------------------- 2 | 3 | Common Objects in COntext (COCO) Toolbox. version 3.0 4 | Data, paper, and tutorials available at: http://mscoco.org/ 5 | Code written by Pedro O. Pinheiro and Piotr Dollar, 2016. 6 | Licensed under the Simplified BSD License [see coco/license.txt] 7 | 8 | ------------------------------------------------------------------------------]] 9 | 10 | local coco = {} 11 | return coco 12 | -------------------------------------------------------------------------------- /cocoapi/LuaAPI/init.lua: -------------------------------------------------------------------------------- 1 | --[[---------------------------------------------------------------------------- 2 | 3 | Common Objects in COntext (COCO) Toolbox. version 3.0 4 | Data, paper, and tutorials available at: http://mscoco.org/ 5 | Code written by Pedro O. Pinheiro and Piotr Dollar, 2016. 6 | Licensed under the Simplified BSD License [see coco/license.txt] 7 | 8 | ------------------------------------------------------------------------------]] 9 | 10 | local coco = require 'coco.env' 11 | require 'coco.CocoApi' 12 | require 'coco.MaskApi' 13 | return coco 14 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | cocoapi/images/ 2 | cocoapi/annotations/ 3 | cocoapi/results/ 4 | cocoapi/external/ 5 | cocoapi/.DS_Store 6 | 7 | cocoapi/MatlabAPI/analyze*/ 8 | cocoapi/MatlabAPI/visualize*/ 9 | cocoapi/MatlabAPI/private/maskApiMex.* 10 | 11 | cocoapi/PythonAPI/pycocotools/__init__.pyc 12 | cocoapi/PythonAPI/pycocotools/_mask.c 13 | cocoapi/PythonAPI/pycocotools/_mask.so 14 | cocoapi/PythonAPI/pycocotools/coco.pyc 15 | cocoapi/PythonAPI/pycocotools/cocoeval.pyc 16 | cocoapi/PythonAPI/pycocotools/mask.pyc 17 | cocoapi/PythonAPI/build/ 18 | cocoapi/PythonAPI/pycocotools.egg-info/ 19 | cocoapi/PythonAPI/pycocotools/__pycache__/ 20 | cocoapi/PythonAPI/pycocotools/_mask.cpython-*.so 21 | 22 | cocoapi/PythonAPI/pycocotools/_cocoeval.c 23 | cocoapi/PythonAPI/pycocotools/_cocoeval*so 24 | cocoapi/PythonAPI/cython_debug 25 | 26 | -------------------------------------------------------------------------------- /cocoapi/LuaAPI/cocoDemo.lua: -------------------------------------------------------------------------------- 1 | -- Demo for the CocoApi (see CocoApi.lua) 2 | coco = require 'coco' 3 | image = require 'image' 4 | 5 | -- initialize COCO api (please specify dataType/annType below) 6 | annTypes = { 'instances', 'captions', 'person_keypoints' } 7 | dataType, annType = 'val2014', annTypes[1]; -- specify dataType/annType 8 | annFile = '../annotations/'..annType..'_'..dataType..'.json' 9 | cocoApi=coco.CocoApi(annFile) 10 | 11 | -- get all image ids, select one at random 12 | imgIds = cocoApi:getImgIds() 13 | imgId = imgIds[torch.random(imgIds:numel())] 14 | 15 | -- load image 16 | img = cocoApi:loadImgs(imgId)[1] 17 | I = image.load('../images/'..dataType..'/'..img.file_name,3) 18 | 19 | -- load and display instance annotations 20 | annIds = cocoApi:getAnnIds({imgId=imgId}) 21 | anns = cocoApi:loadAnns(annIds) 22 | J = cocoApi:showAnns(I,anns) 23 | image.save('RES_'..img.file_name,J:double()) 24 | -------------------------------------------------------------------------------- /cocoapi/LuaAPI/rocks/coco-scm-1.rockspec: -------------------------------------------------------------------------------- 1 | package = "coco" 2 | version = "scm-1" 3 | 4 | source = { 5 | url = "git://github.com/pdollar/coco.git" 6 | } 7 | 8 | description = { 9 | summary = "Interface for accessing the Microsoft COCO dataset", 10 | detailed = "See http://mscoco.org/ for more details", 11 | homepage = "https://github.com/pdollar/coco", 12 | license = "Simplified BSD" 13 | } 14 | 15 | dependencies = { 16 | "lua >= 5.1", 17 | "torch >= 7.0", 18 | "lua-cjson" 19 | } 20 | 21 | build = { 22 | type = "builtin", 23 | modules = { 24 | ["coco.env"] = "LuaAPI/env.lua", 25 | ["coco.init"] = "LuaAPI/init.lua", 26 | ["coco.MaskApi"] = "LuaAPI/MaskApi.lua", 27 | ["coco.CocoApi"] = "LuaAPI/CocoApi.lua", 28 | libmaskapi = { 29 | sources = { "common/maskApi.c" }, 30 | incdirs = { "common/" } 31 | } 32 | } 33 | } 34 | 35 | -- luarocks make LuaAPI/rocks/coco-scm-1.rockspec 36 | -- https://github.com/pdollar/coco/raw/master/LuaAPI/rocks/coco-scm-1.rockspec 37 | -------------------------------------------------------------------------------- /cocoapi/PythonAPI/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, Extension 2 | import numpy as np 3 | from Cython.Build import cythonize 4 | 5 | # To compile and install locally run "python setup.py build_ext --inplace" 6 | # To install library to Python site-packages run "python setup.py build_ext install" 7 | 8 | ext_modules = [ 9 | Extension( 10 | 'pycocotools._mask', 11 | sources=['../common/maskApi.c', 'pycocotools/_mask.pyx'], 12 | include_dirs = [np.get_include(), '../common'], 13 | extra_compile_args=['-Wno-cpp', '-Wno-unused-function', '-std=c99'], 14 | ), 15 | Extension( 16 | 'pycocotools._cocoeval', 17 | sources=['pycocotools/_cocoeval.pyx'], 18 | include_dirs = [np.get_include(), '../common'], 19 | extra_compile_args=['-Wno-cpp', '-Wno-unused-function', '-std=c99'], 20 | ) 21 | ] 22 | 23 | setup( 24 | name='pycocotools', 25 | packages=['pycocotools'], 26 | package_dir = {'pycocotools': 'pycocotools'}, 27 | install_requires=[ 28 | 'setuptools>=18.0', 29 | 'cython>=0.27.3', 30 | 'matplotlib>=2.1.0' 31 | ], 32 | version='2.0', 33 | ext_modules=cythonize(ext_modules), 34 | ) 35 | -------------------------------------------------------------------------------- /cocoapi/README.txt: -------------------------------------------------------------------------------- 1 | COCO API - http://cocodataset.org/ 2 | 3 | COCO is a large image dataset designed for object detection, segmentation, person keypoints detection, stuff segmentation, and caption generation. This package provides Matlab, Python, and Lua APIs that assists in loading, parsing, and visualizing the annotations in COCO. Please visit http://cocodataset.org/ for more information on COCO, including for the data, paper, and tutorials. The exact format of the annotations is also described on the COCO website. The Matlab and Python APIs are complete, the Lua API provides only basic functionality. 4 | 5 | In addition to this API, please download both the COCO images and annotations in order to run the demos and use the API. Both are available on the project website. 6 | -Please download, unzip, and place the images in: coco/images/ 7 | -Please download and place the annotations in: coco/annotations/ 8 | For substantially more details on the API please see http://cocodataset.org/#download. 9 | 10 | After downloading the images and annotations, run the Matlab, Python, or Lua demos for example usage. 11 | 12 | To install: 13 | -For Matlab, add coco/MatlabApi to the Matlab path (OSX/Linux binaries provided) 14 | -For Python, run "make" under coco/PythonAPI 15 | -For Lua, run “luarocks make LuaAPI/rocks/coco-scm-1.rockspec” under coco/ 16 | -------------------------------------------------------------------------------- /cocoapi/MatlabAPI/cocoDemo.m: -------------------------------------------------------------------------------- 1 | %% Demo for the CocoApi (see CocoApi.m) 2 | 3 | %% initialize COCO api (please specify dataType/annType below) 4 | annTypes = { 'instances', 'captions', 'person_keypoints' }; 5 | dataType='val2014'; annType=annTypes{1}; % specify dataType/annType 6 | annFile=sprintf('../annotations/%s_%s.json',annType,dataType); 7 | coco=CocoApi(annFile); 8 | 9 | %% display COCO categories and supercategories 10 | if( ~strcmp(annType,'captions') ) 11 | cats = coco.loadCats(coco.getCatIds()); 12 | nms={cats.name}; fprintf('COCO categories: '); 13 | fprintf('%s, ',nms{:}); fprintf('\n'); 14 | nms=unique({cats.supercategory}); fprintf('COCO supercategories: '); 15 | fprintf('%s, ',nms{:}); fprintf('\n'); 16 | end 17 | 18 | %% get all images containing given categories, select one at random 19 | catIds = coco.getCatIds('catNms',{'person','dog','skateboard'}); 20 | imgIds = coco.getImgIds('catIds',catIds); 21 | imgId = imgIds(randi(length(imgIds))); 22 | 23 | %% load and display image 24 | img = coco.loadImgs(imgId); 25 | I = imread(sprintf('../images/%s/%s',dataType,img.file_name)); 26 | figure(1); imagesc(I); axis('image'); set(gca,'XTick',[],'YTick',[]) 27 | 28 | %% load and display annotations 29 | annIds = coco.getAnnIds('imgIds',imgId,'catIds',catIds,'iscrowd',[]); 30 | anns = coco.loadAnns(annIds); coco.showAnns(anns); 31 | -------------------------------------------------------------------------------- /cocoapi/license.txt: -------------------------------------------------------------------------------- 1 | Copyright (c) 2014, Piotr Dollar and Tsung-Yi Lin 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are met: 6 | 7 | 1. Redistributions of source code must retain the above copyright notice, this 8 | list of conditions and the following disclaimer. 9 | 2. Redistributions in binary form must reproduce the above copyright notice, 10 | this list of conditions and the following disclaimer in the documentation 11 | and/or other materials provided with the distribution. 12 | 13 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 14 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 15 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 16 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR 17 | ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 18 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 19 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 20 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 21 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 22 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 23 | 24 | The views and conclusions contained in the software and documentation are those 25 | of the authors and should not be interpreted as representing official policies, 26 | either expressed or implied, of the FreeBSD Project. 27 | -------------------------------------------------------------------------------- /cocoapi/MatlabAPI/evalDemo.m: -------------------------------------------------------------------------------- 1 | %% Demo demonstrating the algorithm result formats for COCO 2 | 3 | %% select results type for demo (either bbox or segm) 4 | type = {'segm','bbox','keypoints'}; type = type{1}; % specify type here 5 | fprintf('Running demo for *%s* results.\n\n',type); 6 | 7 | %% initialize COCO ground truth api 8 | dataDir='../'; prefix='instances'; dataType='val2014'; 9 | if(strcmp(type,'keypoints')), prefix='person_keypoints'; end 10 | annFile=sprintf('%s/annotations/%s_%s.json',dataDir,prefix,dataType); 11 | cocoGt=CocoApi(annFile); 12 | 13 | %% initialize COCO detections api 14 | resFile='%s/results/%s_%s_fake%s100_results.json'; 15 | resFile=sprintf(resFile,dataDir,prefix,dataType,type); 16 | cocoDt=cocoGt.loadRes(resFile); 17 | 18 | %% visialuze gt and dt side by side 19 | imgIds=sort(cocoGt.getImgIds()); imgIds=imgIds(1:100); 20 | imgId = imgIds(randi(100)); img = cocoGt.loadImgs(imgId); 21 | I = imread(sprintf('%s/images/val2014/%s',dataDir,img.file_name)); 22 | figure(1); subplot(1,2,1); imagesc(I); axis('image'); axis off; 23 | annIds = cocoGt.getAnnIds('imgIds',imgId); title('ground truth') 24 | anns = cocoGt.loadAnns(annIds); cocoGt.showAnns(anns); 25 | figure(1); subplot(1,2,2); imagesc(I); axis('image'); axis off; 26 | annIds = cocoDt.getAnnIds('imgIds',imgId); title('results') 27 | anns = cocoDt.loadAnns(annIds); cocoDt.showAnns(anns); 28 | 29 | %% load raw JSON and show exact format for results 30 | fprintf('results structure have the following format:\n'); 31 | res = gason(fileread(resFile)); disp(res) 32 | 33 | %% the following command can be used to save the results back to disk 34 | if(0), f=fopen(resFile,'w'); fwrite(f,gason(res)); fclose(f); end 35 | 36 | %% run COCO evaluation code (see CocoEval.m) 37 | cocoEval=CocoEval(cocoGt,cocoDt,type); 38 | cocoEval.params.imgIds=imgIds; 39 | cocoEval.evaluate(); 40 | cocoEval.accumulate(); 41 | cocoEval.summarize(); 42 | 43 | %% generate Derek Hoiem style analyis of false positives (slow) 44 | if(0), cocoEval.analyze(); end 45 | -------------------------------------------------------------------------------- /cocoapi/common/maskApi.h: -------------------------------------------------------------------------------- 1 | /************************************************************************** 2 | * Microsoft COCO Toolbox. version 2.0 3 | * Data, paper, and tutorials available at: http://mscoco.org/ 4 | * Code written by Piotr Dollar and Tsung-Yi Lin, 2015. 5 | * Licensed under the Simplified BSD License [see coco/license.txt] 6 | **************************************************************************/ 7 | #pragma once 8 | 9 | typedef unsigned int uint; 10 | typedef unsigned long siz; 11 | typedef unsigned char byte; 12 | typedef double* BB; 13 | typedef struct { siz h, w, m; uint *cnts; } RLE; 14 | 15 | /* Initialize/destroy RLE. */ 16 | void rleInit( RLE *R, siz h, siz w, siz m, uint *cnts ); 17 | void rleFree( RLE *R ); 18 | 19 | /* Initialize/destroy RLE array. */ 20 | void rlesInit( RLE **R, siz n ); 21 | void rlesFree( RLE **R, siz n ); 22 | 23 | /* Encode binary masks using RLE. */ 24 | void rleEncode( RLE *R, const byte *mask, siz h, siz w, siz n ); 25 | 26 | /* Decode binary masks encoded via RLE. */ 27 | void rleDecode( const RLE *R, byte *mask, siz n ); 28 | 29 | /* Compute union or intersection of encoded masks. */ 30 | void rleMerge( const RLE *R, RLE *M, siz n, int intersect ); 31 | 32 | /* Compute area of encoded masks. */ 33 | void rleArea( const RLE *R, siz n, uint *a ); 34 | 35 | /* Compute intersection over union between masks. */ 36 | void rleIou( RLE *dt, RLE *gt, siz m, siz n, byte *iscrowd, double *o ); 37 | 38 | /* Compute non-maximum suppression between bounding masks */ 39 | void rleNms( RLE *dt, siz n, uint *keep, double thr ); 40 | 41 | /* Compute intersection over union between bounding boxes. */ 42 | void bbIou( BB dt, BB gt, siz m, siz n, byte *iscrowd, double *o ); 43 | 44 | /* Compute non-maximum suppression between bounding boxes */ 45 | void bbNms( BB dt, siz n, uint *keep, double thr ); 46 | 47 | /* Get bounding boxes surrounding encoded masks. */ 48 | void rleToBbox( const RLE *R, BB bb, siz n ); 49 | 50 | /* Convert bounding boxes to encoded masks. */ 51 | void rleFrBbox( RLE *R, const BB bb, siz h, siz w, siz n ); 52 | 53 | /* Convert polygon to encoded mask. */ 54 | void rleFrPoly( RLE *R, const double *xy, siz k, siz h, siz w ); 55 | 56 | /* Get compressed string representation of encoded mask. */ 57 | char* rleToString( const RLE *R ); 58 | 59 | /* Convert from compressed string representation of encoded mask. */ 60 | void rleFrString( RLE *R, char *s, siz h, siz w ); 61 | -------------------------------------------------------------------------------- /cocoapi/MatlabAPI/gason.m: -------------------------------------------------------------------------------- 1 | function out = gason( in ) 2 | % Convert between JSON strings and corresponding JSON objects. 3 | % 4 | % This parser is based on Gason written and maintained by Ivan Vashchaev: 5 | % https://github.com/vivkin/gason 6 | % Gason is a "lightweight and fast JSON parser for C++". Please see the 7 | % above link for license information and additional details about Gason. 8 | % 9 | % Given a JSON string, gason calls the C++ parser and converts the output 10 | % into an appropriate Matlab structure. As the parsing is performed in mex 11 | % the resulting parser is blazingly fast. Large JSON structs (100MB+) take 12 | % only a few seconds to parse (compared to hours for pure Matlab parsers). 13 | % 14 | % Given a JSON object, gason calls the C++ encoder to convert the object 15 | % back into a JSON string representation. Nearly any Matlab struct, cell 16 | % array, or numeric array represent a valid JSON object. Note that gason() 17 | % can be used to go both from JSON string to JSON object and back. 18 | % 19 | % Gason requires C++11 to compile (for GCC this requires version 4.7 or 20 | % later). The following command compiles the parser (may require tweaking): 21 | % mex('CXXFLAGS=\$CXXFLAGS -std=c++11 -Wall','-largeArrayDims',... 22 | % 'private/gasonMex.cpp','../common/gason.cpp',... 23 | % '-I../common/','-outdir','private'); 24 | % Note the use of the "-std=c++11" flag. A number of precompiled binaries 25 | % are included, please do not contact us for help with compiling. If needed 26 | % you can specify a compiler by adding the option 'CXX="/usr/bin/g++"'. 27 | % 28 | % Note that by default JSON arrays that contain only numbers are stored as 29 | % regular Matlab arrays. Likewise, JSON arrays that contain only objects of 30 | % the same type are stored as Matlab struct arrays. This is much faster and 31 | % can use considerably less memory than always using Matlab cell arrays. 32 | % 33 | % USAGE 34 | % object = gason( string ) 35 | % string = gason( object ) 36 | % 37 | % INPUTS/OUTPUTS 38 | % string - JSON string 39 | % object - JSON object 40 | % 41 | % EXAMPLE 42 | % o = struct('first',{'piotr','ty'},'last',{'dollar','lin'}) 43 | % s = gason( o ) % convert JSON object -> JSON string 44 | % p = gason( s ) % convert JSON string -> JSON object 45 | % 46 | % See also 47 | % 48 | % Microsoft COCO Toolbox. version 2.0 49 | % Data, paper, and tutorials available at: http://mscoco.org/ 50 | % Code written by Piotr Dollar and Tsung-Yi Lin, 2015. 51 | % Licensed under the Simplified BSD License [see coco/license.txt] 52 | 53 | out = gasonMex( 'convert', in ); 54 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Beyond mAP: Towards better evaluation of instance segmentation 2 | 3 | This is the repository containing the source code for the _CVPR 2023_ [paper](https://arxiv.org/pdf/2207.01614.pdf) ✨. 4 | 5 | ## Installation instructions 6 | Go to `cocoapi/PythonAPI` directory and install pycocotools as follows: 7 | ``` 8 | python setup.py build_ext --inplace 9 | ``` 10 | 11 | Next, copy the files from `detectron2/evaluation` into your detectron2 installation at `detectron2/detectron2/evaluation/`. Your detectron2 directory should look like this: 12 | ``` 13 | detectron2 14 | |-- README.md 15 | |-- configs 16 | | `-- ... 17 | |-- datasets 18 | | `-- ... 19 | |-- demo 20 | | `-- ... 21 | |-- detectron2 22 | | |-- evaluation 23 | | | |-- __init__.py 24 | | | |-- connectiveness_evaluator.py 25 | | | |-- f1score_evaluator.py 26 | | | |-- f_boundary.py 27 | | | |-- lrp_evaluator.py 28 | | | |-- namingerror_evaluator.py 29 | | | |-- tpmqscore_evaluator.py 30 | | | `-- 31 | | `-- ... 32 | `-- ... 33 | ``` 34 | 35 | Next, replace the following line in `build_evaluator` function in your `train_net.py` script: 36 | 37 | ``` 38 | if evaluator_type in ["coco", "coco_panoptic_seg"]: 39 | evaluator_list.append(COCOEvaluator(dataset_name, cfg, True, output_folder)) 40 | ``` 41 | 42 | to 43 | ``` 44 | if evaluator_type in ["coco", "coco_panoptic_seg"]: 45 | evaluator_list.append(F1ScoreEvaluator(dataset_name, cfg, True, output_folder)) 46 | evaluator_list.append(NamingErrorEvaluator(dataset_name, cfg, True, output_folder)) 47 | evaluator_list.append(ConnectivenessEvaluator(dataset_name, cfg, True, output_folder)) 48 | evaluator_list.append(LRPEvaluator(dataset_name, cfg, True, output_folder)) 49 | evaluator_list.append(TPMQScoreEvaluator(dataset_name, cfg, True, output_folder)) 50 | evaluator_list.append(COCOEvaluator(dataset_name, cfg, True, output_folder)) 51 | ``` 52 | 53 | Finally, run your code! 54 | 55 | ## Bibtex 56 | 57 | If you find our work useful for your research, please cite: 58 | ``` 59 | @article{jena2023beyond, 60 | author = {Jena, Rohit and Zhornyak, Lukas and Doiphode, Nehal and Chaudhari, Pratik and Buch, Vivek and Gee, James and Shi, Jianbo}, 61 | title = {Beyond mAP: Towards better evaluation of instance segmentation}, 62 | journal = {CVPR}, 63 | year = {2023}, 64 | } 65 | ``` 66 | 67 | ## To-do 68 | I'm listing out the to-do items that I feel are important, feel free to convey your suggestions or feedback through the Issue Tracker, or email me directly. 69 | 70 | - [ ] Add Cython modules for Naming Error 71 | - [ ] Currently, all modules use their own COCOEval, resulting in redundant evaluation. Idea is to collate all the evaluators (except `ConnectivenessEvaluator` and `NamingErrorEvaluator`) to use the same COCOEval object. 72 | - [ ] Add mmdet implementations of all evaluators. 73 | -------------------------------------------------------------------------------- /cocoapi/MatlabAPI/private/getPrmDflt.m: -------------------------------------------------------------------------------- 1 | function varargout = getPrmDflt( prm, dfs, checkExtra ) 2 | % Helper to set default values (if not already set) of parameter struct. 3 | % 4 | % Takes input parameters and a list of 'name'/default pairs, and for each 5 | % 'name' for which prm has no value (prm.(name) is not a field or 'name' 6 | % does not appear in prm list), getPrmDflt assigns the given default 7 | % value. If default value for variable 'name' is 'REQ', and value for 8 | % 'name' is not given, an error is thrown. See below for usage details. 9 | % 10 | % USAGE (nargout==1) 11 | % prm = getPrmDflt( prm, dfs, [checkExtra] ) 12 | % 13 | % USAGE (nargout>1) 14 | % [ param1 ... paramN ] = getPrmDflt( prm, dfs, [checkExtra] ) 15 | % 16 | % INPUTS 17 | % prm - param struct or cell of form {'name1' v1 'name2' v2 ...} 18 | % dfs - cell of form {'name1' def1 'name2' def2 ...} 19 | % checkExtra - [0] if 1 throw error if prm contains params not in dfs 20 | % if -1 if prm contains params not in dfs adds them 21 | % 22 | % OUTPUTS (nargout==1) 23 | % prm - parameter struct with fields 'name1' through 'nameN' assigned 24 | % 25 | % OUTPUTS (nargout>1) 26 | % param1 - value assigned to parameter with 'name1' 27 | % ... 28 | % paramN - value assigned to parameter with 'nameN' 29 | % 30 | % EXAMPLE 31 | % dfs = { 'x','REQ', 'y',0, 'z',[], 'eps',1e-3 }; 32 | % prm = getPrmDflt( struct('x',1,'y',1), dfs ) 33 | % [ x y z eps ] = getPrmDflt( {'x',2,'y',1}, dfs ) 34 | % 35 | % See also INPUTPARSER 36 | % 37 | % Piotr's Computer Vision Matlab Toolbox Version 2.60 38 | % Copyright 2014 Piotr Dollar. [pdollar-at-gmail.com] 39 | % Licensed under the Simplified BSD License [see external/bsd.txt] 40 | 41 | if( mod(length(dfs),2) ), error('odd number of default parameters'); end 42 | if nargin<=2, checkExtra = 0; end 43 | 44 | % get the input parameters as two cell arrays: prmVal and prmField 45 | if iscell(prm) && length(prm)==1, prm=prm{1}; end 46 | if iscell(prm) 47 | if(mod(length(prm),2)), error('odd number of parameters in prm'); end 48 | prmField = prm(1:2:end); prmVal = prm(2:2:end); 49 | else 50 | if(~isstruct(prm)), error('prm must be a struct or a cell'); end 51 | prmVal = struct2cell(prm); prmField = fieldnames(prm); 52 | end 53 | 54 | % get and update default values using quick for loop 55 | dfsField = dfs(1:2:end); dfsVal = dfs(2:2:end); 56 | if checkExtra>0 57 | for i=1:length(prmField) 58 | j = find(strcmp(prmField{i},dfsField)); 59 | if isempty(j), error('parameter %s is not valid', prmField{i}); end 60 | dfsVal(j) = prmVal(i); 61 | end 62 | elseif checkExtra<0 63 | for i=1:length(prmField) 64 | j = find(strcmp(prmField{i},dfsField)); 65 | if isempty(j), j=length(dfsVal)+1; dfsField{j}=prmField{i}; end 66 | dfsVal(j) = prmVal(i); 67 | end 68 | else 69 | for i=1:length(prmField) 70 | dfsVal(strcmp(prmField{i},dfsField)) = prmVal(i); 71 | end 72 | end 73 | 74 | % check for missing values 75 | if any(strcmp('REQ',dfsVal)) 76 | cmpArray = find(strcmp('REQ',dfsVal)); 77 | error(['Required field ''' dfsField{cmpArray(1)} ''' not specified.'] ); 78 | end 79 | 80 | % set output 81 | if nargout==1 82 | varargout{1} = cell2struct( dfsVal, dfsField, 2 ); 83 | else 84 | varargout = dfsVal; 85 | end 86 | -------------------------------------------------------------------------------- /detectron2/evaluation/f_boundary.py: -------------------------------------------------------------------------------- 1 | # ---------------------------------------------------------------------------- 2 | # A Benchmark Dataset and Evaluation Methodology for Video Object Segmentation 3 | #----------------------------------------------------------------------------- 4 | # Copyright (c) 2016 Federico Perazzi 5 | # Licensed under the BSD License [see LICENSE for details] 6 | # Written by Federico Perazzi 7 | # ---------------------------------------------------------------------------- 8 | 9 | import numpy as np 10 | 11 | """ Utilities for computing, reading and saving benchmark evaluation.""" 12 | 13 | def db_eval_boundary(foreground_mask, gt_mask, bound_th=0.008): 14 | """ 15 | Compute mean,recall and decay from per-frame evaluation. 16 | Calculates precision/recall for boundaries between foreground_mask and 17 | gt_mask using morphological operators to speed it up. 18 | 19 | Arguments: 20 | foreground_mask (ndarray): binary segmentation image. 21 | gt_mask (ndarray): binary annotated image. 22 | 23 | Returns: 24 | F (float): boundaries F-measure 25 | P (float): boundaries precision 26 | R (float): boundaries recall 27 | """ 28 | assert np.atleast_3d(foreground_mask).shape[2] == 1 29 | 30 | bound_pix = bound_th if bound_th >= 1 else \ 31 | np.ceil(bound_th * np.linalg.norm(foreground_mask.shape)) 32 | 33 | # Get the pixel boundaries of both masks 34 | fg_boundary = seg2bmap(foreground_mask); 35 | gt_boundary = seg2bmap(gt_mask); 36 | 37 | from skimage.morphology import binary_dilation,disk 38 | 39 | fg_dil = binary_dilation(fg_boundary, disk(bound_pix)) 40 | gt_dil = binary_dilation(gt_boundary, disk(bound_pix)) 41 | 42 | # Get the intersection 43 | gt_match = gt_boundary * fg_dil 44 | fg_match = fg_boundary * gt_dil 45 | 46 | # Area of the intersection 47 | n_fg = np.sum(fg_boundary) 48 | n_gt = np.sum(gt_boundary) 49 | 50 | #% Compute precision and recall 51 | if n_fg == 0 and n_gt > 0: 52 | precision = 1 53 | recall = 0 54 | elif n_fg > 0 and n_gt == 0: 55 | precision = 0 56 | recall = 1 57 | elif n_fg == 0 and n_gt == 0: 58 | precision = 1 59 | recall = 1 60 | else: 61 | precision = np.sum(fg_match)/float(n_fg) 62 | recall = np.sum(gt_match)/float(n_gt) 63 | 64 | # Compute F measure 65 | if precision + recall == 0: 66 | F = 0 67 | else: 68 | F = 2*precision*recall/(precision+recall); 69 | return F 70 | 71 | 72 | def seg2bmap(seg,width=None,height=None): 73 | """ 74 | From a segmentation, compute a binary boundary map with 1 pixel wide 75 | boundaries. The boundary pixels are offset by 1/2 pixel towards the 76 | origin from the actual segment boundary. 77 | 78 | Arguments: 79 | seg : Segments labeled from 1..k. 80 | width : Width of desired bmap <= seg.shape[1] 81 | height : Height of desired bmap <= seg.shape[0] 82 | 83 | Returns: 84 | bmap (ndarray): Binary boundary map. 85 | 86 | David Martin 87 | January 2003 88 | """ 89 | 90 | seg = seg.astype(np.bool) 91 | seg[seg>0] = 1 92 | 93 | assert np.atleast_3d(seg).shape[2] == 1 94 | 95 | width = seg.shape[1] if width is None else width 96 | height = seg.shape[0] if height is None else height 97 | 98 | h,w = seg.shape[:2] 99 | 100 | ar1 = float(width) / float(height) 101 | ar2 = float(w) / float(h) 102 | 103 | assert not (width>w | height>h | abs(ar1-ar2)>0.01),\ 104 | 'Can''t convert %dx%d seg to %dx%d bmap.'%(w,h,width,height) 105 | 106 | e = np.zeros_like(seg) 107 | s = np.zeros_like(seg) 108 | se = np.zeros_like(seg) 109 | 110 | e[:,:-1] = seg[:,1:] 111 | s[:-1,:] = seg[1:,:] 112 | se[:-1,:-1] = seg[1:,1:] 113 | 114 | b = seg^e | seg^s | seg^se 115 | b[-1,:] = seg[-1,:]^e[-1,:] 116 | b[:,-1] = seg[:,-1]^s[:,-1] 117 | b[-1,-1] = 0 118 | 119 | if w == width and h == height: 120 | bmap = b 121 | else: 122 | bmap = np.zeros((height,width)) 123 | for x in range(w): 124 | for y in range(h): 125 | if b[y,x]: 126 | j = 1+floor((y-1)+height / h) 127 | i = 1+floor((x-1)+width / h) 128 | bmap[j,i] = 1; 129 | 130 | return bmap -------------------------------------------------------------------------------- /cocoapi/common/gason.h: -------------------------------------------------------------------------------- 1 | // https://github.com/vivkin/gason - pulled January 10, 2016 2 | #pragma once 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | enum JsonTag { 9 | JSON_NUMBER = 0, 10 | JSON_STRING, 11 | JSON_ARRAY, 12 | JSON_OBJECT, 13 | JSON_TRUE, 14 | JSON_FALSE, 15 | JSON_NULL = 0xF 16 | }; 17 | 18 | struct JsonNode; 19 | 20 | #define JSON_VALUE_PAYLOAD_MASK 0x00007FFFFFFFFFFFULL 21 | #define JSON_VALUE_NAN_MASK 0x7FF8000000000000ULL 22 | #define JSON_VALUE_TAG_MASK 0xF 23 | #define JSON_VALUE_TAG_SHIFT 47 24 | 25 | union JsonValue { 26 | uint64_t ival; 27 | double fval; 28 | 29 | JsonValue(double x) 30 | : fval(x) { 31 | } 32 | JsonValue(JsonTag tag = JSON_NULL, void *payload = nullptr) { 33 | assert((uintptr_t)payload <= JSON_VALUE_PAYLOAD_MASK); 34 | ival = JSON_VALUE_NAN_MASK | ((uint64_t)tag << JSON_VALUE_TAG_SHIFT) | (uintptr_t)payload; 35 | } 36 | bool isDouble() const { 37 | return (int64_t)ival <= (int64_t)JSON_VALUE_NAN_MASK; 38 | } 39 | JsonTag getTag() const { 40 | return isDouble() ? JSON_NUMBER : JsonTag((ival >> JSON_VALUE_TAG_SHIFT) & JSON_VALUE_TAG_MASK); 41 | } 42 | uint64_t getPayload() const { 43 | assert(!isDouble()); 44 | return ival & JSON_VALUE_PAYLOAD_MASK; 45 | } 46 | double toNumber() const { 47 | assert(getTag() == JSON_NUMBER); 48 | return fval; 49 | } 50 | char *toString() const { 51 | assert(getTag() == JSON_STRING); 52 | return (char *)getPayload(); 53 | } 54 | JsonNode *toNode() const { 55 | assert(getTag() == JSON_ARRAY || getTag() == JSON_OBJECT); 56 | return (JsonNode *)getPayload(); 57 | } 58 | }; 59 | 60 | struct JsonNode { 61 | JsonValue value; 62 | JsonNode *next; 63 | char *key; 64 | }; 65 | 66 | struct JsonIterator { 67 | JsonNode *p; 68 | 69 | void operator++() { 70 | p = p->next; 71 | } 72 | bool operator!=(const JsonIterator &x) const { 73 | return p != x.p; 74 | } 75 | JsonNode *operator*() const { 76 | return p; 77 | } 78 | JsonNode *operator->() const { 79 | return p; 80 | } 81 | }; 82 | 83 | inline JsonIterator begin(JsonValue o) { 84 | return JsonIterator{o.toNode()}; 85 | } 86 | inline JsonIterator end(JsonValue) { 87 | return JsonIterator{nullptr}; 88 | } 89 | 90 | #define JSON_ERRNO_MAP(XX) \ 91 | XX(OK, "ok") \ 92 | XX(BAD_NUMBER, "bad number") \ 93 | XX(BAD_STRING, "bad string") \ 94 | XX(BAD_IDENTIFIER, "bad identifier") \ 95 | XX(STACK_OVERFLOW, "stack overflow") \ 96 | XX(STACK_UNDERFLOW, "stack underflow") \ 97 | XX(MISMATCH_BRACKET, "mismatch bracket") \ 98 | XX(UNEXPECTED_CHARACTER, "unexpected character") \ 99 | XX(UNQUOTED_KEY, "unquoted key") \ 100 | XX(BREAKING_BAD, "breaking bad") \ 101 | XX(ALLOCATION_FAILURE, "allocation failure") 102 | 103 | enum JsonErrno { 104 | #define XX(no, str) JSON_##no, 105 | JSON_ERRNO_MAP(XX) 106 | #undef XX 107 | }; 108 | 109 | const char *jsonStrError(int err); 110 | 111 | class JsonAllocator { 112 | struct Zone { 113 | Zone *next; 114 | size_t used; 115 | } *head = nullptr; 116 | 117 | public: 118 | JsonAllocator() = default; 119 | JsonAllocator(const JsonAllocator &) = delete; 120 | JsonAllocator &operator=(const JsonAllocator &) = delete; 121 | JsonAllocator(JsonAllocator &&x) : head(x.head) { 122 | x.head = nullptr; 123 | } 124 | JsonAllocator &operator=(JsonAllocator &&x) { 125 | head = x.head; 126 | x.head = nullptr; 127 | return *this; 128 | } 129 | ~JsonAllocator() { 130 | deallocate(); 131 | } 132 | void *allocate(size_t size); 133 | void deallocate(); 134 | }; 135 | 136 | int jsonParse(char *str, char **endptr, JsonValue *value, JsonAllocator &allocator); 137 | -------------------------------------------------------------------------------- /cocoapi/PythonAPI/pycocotools/mask.py: -------------------------------------------------------------------------------- 1 | __author__ = 'tsungyi' 2 | 3 | import pycocotools._mask as _mask 4 | 5 | # Interface for manipulating masks stored in RLE format. 6 | # 7 | # RLE is a simple yet efficient format for storing binary masks. RLE 8 | # first divides a vector (or vectorized image) into a series of piecewise 9 | # constant regions and then for each piece simply stores the length of 10 | # that piece. For example, given M=[0 0 1 1 1 0 1] the RLE counts would 11 | # be [2 3 1 1], or for M=[1 1 1 1 1 1 0] the counts would be [0 6 1] 12 | # (note that the odd counts are always the numbers of zeros). Instead of 13 | # storing the counts directly, additional compression is achieved with a 14 | # variable bitrate representation based on a common scheme called LEB128. 15 | # 16 | # Compression is greatest given large piecewise constant regions. 17 | # Specifically, the size of the RLE is proportional to the number of 18 | # *boundaries* in M (or for an image the number of boundaries in the y 19 | # direction). Assuming fairly simple shapes, the RLE representation is 20 | # O(sqrt(n)) where n is number of pixels in the object. Hence space usage 21 | # is substantially lower, especially for large simple objects (large n). 22 | # 23 | # Many common operations on masks can be computed directly using the RLE 24 | # (without need for decoding). This includes computations such as area, 25 | # union, intersection, etc. All of these operations are linear in the 26 | # size of the RLE, in other words they are O(sqrt(n)) where n is the area 27 | # of the object. Computing these operations on the original mask is O(n). 28 | # Thus, using the RLE can result in substantial computational savings. 29 | # 30 | # The following API functions are defined: 31 | # encode - Encode binary masks using RLE. 32 | # decode - Decode binary masks encoded via RLE. 33 | # merge - Compute union or intersection of encoded masks. 34 | # iou - Compute intersection over union between masks. 35 | # area - Compute area of encoded masks. 36 | # toBbox - Get bounding boxes surrounding encoded masks. 37 | # frPyObjects - Convert polygon, bbox, and uncompressed RLE to encoded RLE mask. 38 | # 39 | # Usage: 40 | # Rs = encode( masks ) 41 | # masks = decode( Rs ) 42 | # R = merge( Rs, intersect=false ) 43 | # o = iou( dt, gt, iscrowd ) 44 | # a = area( Rs ) 45 | # bbs = toBbox( Rs ) 46 | # Rs = frPyObjects( [pyObjects], h, w ) 47 | # 48 | # In the API the following formats are used: 49 | # Rs - [dict] Run-length encoding of binary masks 50 | # R - dict Run-length encoding of binary mask 51 | # masks - [hxwxn] Binary mask(s) (must have type np.ndarray(dtype=uint8) in column-major order) 52 | # iscrowd - [nx1] list of np.ndarray. 1 indicates corresponding gt image has crowd region to ignore 53 | # bbs - [nx4] Bounding box(es) stored as [x y w h] 54 | # poly - Polygon stored as [[x1 y1 x2 y2...],[x1 y1 ...],...] (2D list) 55 | # dt,gt - May be either bounding boxes or encoded masks 56 | # Both poly and bbs are 0-indexed (bbox=[0 0 1 1] encloses first pixel). 57 | # 58 | # Finally, a note about the intersection over union (iou) computation. 59 | # The standard iou of a ground truth (gt) and detected (dt) object is 60 | # iou(gt,dt) = area(intersect(gt,dt)) / area(union(gt,dt)) 61 | # For "crowd" regions, we use a modified criteria. If a gt object is 62 | # marked as "iscrowd", we allow a dt to match any subregion of the gt. 63 | # Choosing gt' in the crowd gt that best matches the dt can be done using 64 | # gt'=intersect(dt,gt). Since by definition union(gt',dt)=dt, computing 65 | # iou(gt,dt,iscrowd) = iou(gt',dt) = area(intersect(gt,dt)) / area(dt) 66 | # For crowd gt regions we use this modified criteria above for the iou. 67 | # 68 | # To compile run "python setup.py build_ext --inplace" 69 | # Please do not contact us for help with compiling. 70 | # 71 | # Microsoft COCO Toolbox. version 2.0 72 | # Data, paper, and tutorials available at: http://mscoco.org/ 73 | # Code written by Piotr Dollar and Tsung-Yi Lin, 2015. 74 | # Licensed under the Simplified BSD License [see coco/license.txt] 75 | 76 | iou = _mask.iou 77 | merge = _mask.merge 78 | frPyObjects = _mask.frPyObjects 79 | 80 | def encode(bimask): 81 | if len(bimask.shape) == 3: 82 | return _mask.encode(bimask) 83 | elif len(bimask.shape) == 2: 84 | h, w = bimask.shape 85 | return _mask.encode(bimask.reshape((h, w, 1), order='F'))[0] 86 | 87 | def decode(rleObjs): 88 | if type(rleObjs) == list: 89 | return _mask.decode(rleObjs) 90 | else: 91 | return _mask.decode([rleObjs])[:,:,0] 92 | 93 | def area(rleObjs): 94 | if type(rleObjs) == list: 95 | return _mask.area(rleObjs) 96 | else: 97 | return _mask.area([rleObjs])[0] 98 | 99 | def toBbox(rleObjs): 100 | if type(rleObjs) == list: 101 | return _mask.toBbox(rleObjs) 102 | else: 103 | return _mask.toBbox([rleObjs])[0] -------------------------------------------------------------------------------- /cocoapi/PythonAPI/pycocoEvalDemo.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": { 7 | "collapsed": false 8 | }, 9 | "outputs": [], 10 | "source": [ 11 | "%matplotlib inline\n", 12 | "import matplotlib.pyplot as plt\n", 13 | "from pycocotools.coco import COCO\n", 14 | "from pycocotools.cocoeval import COCOeval\n", 15 | "import numpy as np\n", 16 | "import skimage.io as io\n", 17 | "import pylab\n", 18 | "pylab.rcParams['figure.figsize'] = (10.0, 8.0)" 19 | ] 20 | }, 21 | { 22 | "cell_type": "code", 23 | "execution_count": 2, 24 | "metadata": { 25 | "collapsed": false 26 | }, 27 | "outputs": [ 28 | { 29 | "name": "stdout", 30 | "output_type": "stream", 31 | "text": [ 32 | "Running demo for *bbox* results.\n" 33 | ] 34 | } 35 | ], 36 | "source": [ 37 | "annType = ['segm','bbox','keypoints']\n", 38 | "annType = annType[1] #specify type here\n", 39 | "prefix = 'person_keypoints' if annType=='keypoints' else 'instances'\n", 40 | "print 'Running demo for *%s* results.'%(annType)" 41 | ] 42 | }, 43 | { 44 | "cell_type": "code", 45 | "execution_count": 3, 46 | "metadata": { 47 | "collapsed": false 48 | }, 49 | "outputs": [ 50 | { 51 | "name": "stdout", 52 | "output_type": "stream", 53 | "text": [ 54 | "loading annotations into memory...\n", 55 | "Done (t=8.01s)\n", 56 | "creating index...\n", 57 | "index created!\n" 58 | ] 59 | } 60 | ], 61 | "source": [ 62 | "#initialize COCO ground truth api\n", 63 | "dataDir='../'\n", 64 | "dataType='val2014'\n", 65 | "annFile = '%s/annotations/%s_%s.json'%(dataDir,prefix,dataType)\n", 66 | "cocoGt=COCO(annFile)" 67 | ] 68 | }, 69 | { 70 | "cell_type": "code", 71 | "execution_count": 4, 72 | "metadata": { 73 | "collapsed": false 74 | }, 75 | "outputs": [ 76 | { 77 | "name": "stdout", 78 | "output_type": "stream", 79 | "text": [ 80 | "Loading and preparing results... \n", 81 | "DONE (t=0.05s)\n", 82 | "creating index...\n", 83 | "index created!\n" 84 | ] 85 | } 86 | ], 87 | "source": [ 88 | "#initialize COCO detections api\n", 89 | "resFile='%s/results/%s_%s_fake%s100_results.json'\n", 90 | "resFile = resFile%(dataDir, prefix, dataType, annType)\n", 91 | "cocoDt=cocoGt.loadRes(resFile)" 92 | ] 93 | }, 94 | { 95 | "cell_type": "code", 96 | "execution_count": 5, 97 | "metadata": { 98 | "collapsed": false 99 | }, 100 | "outputs": [], 101 | "source": [ 102 | "imgIds=sorted(cocoGt.getImgIds())\n", 103 | "imgIds=imgIds[0:100]\n", 104 | "imgId = imgIds[np.random.randint(100)]" 105 | ] 106 | }, 107 | { 108 | "cell_type": "code", 109 | "execution_count": 6, 110 | "metadata": { 111 | "collapsed": false 112 | }, 113 | "outputs": [ 114 | { 115 | "name": "stdout", 116 | "output_type": "stream", 117 | "text": [ 118 | "Running per image evaluation... \n", 119 | "DONE (t=0.46s).\n", 120 | "Accumulating evaluation results... \n", 121 | "DONE (t=0.38s).\n", 122 | " Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.505\n", 123 | " Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.697\n", 124 | " Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.573\n", 125 | " Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.586\n", 126 | " Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.519\n", 127 | " Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.501\n", 128 | " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.387\n", 129 | " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.594\n", 130 | " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.595\n", 131 | " Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.640\n", 132 | " Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.566\n", 133 | " Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.564\n" 134 | ] 135 | } 136 | ], 137 | "source": [ 138 | "# running evaluation\n", 139 | "cocoEval = COCOeval(cocoGt,cocoDt,annType)\n", 140 | "cocoEval.params.imgIds = imgIds\n", 141 | "cocoEval.evaluate()\n", 142 | "cocoEval.accumulate()\n", 143 | "cocoEval.summarize()" 144 | ] 145 | } 146 | ], 147 | "metadata": { 148 | "kernelspec": { 149 | "display_name": "Python 2", 150 | "language": "python", 151 | "name": "python2" 152 | }, 153 | "language_info": { 154 | "codemirror_mode": { 155 | "name": "ipython", 156 | "version": 2 157 | }, 158 | "file_extension": ".py", 159 | "mimetype": "text/x-python", 160 | "name": "python", 161 | "nbconvert_exporter": "python", 162 | "pygments_lexer": "ipython2", 163 | "version": "2.7.10" 164 | } 165 | }, 166 | "nbformat": 4, 167 | "nbformat_minor": 0 168 | } 169 | -------------------------------------------------------------------------------- /cocoapi/MatlabAPI/MaskApi.m: -------------------------------------------------------------------------------- 1 | classdef MaskApi 2 | % Interface for manipulating masks stored in RLE format. 3 | % 4 | % RLE is a simple yet efficient format for storing binary masks. RLE 5 | % first divides a vector (or vectorized image) into a series of piecewise 6 | % constant regions and then for each piece simply stores the length of 7 | % that piece. For example, given M=[0 0 1 1 1 0 1] the RLE counts would 8 | % be [2 3 1 1], or for M=[1 1 1 1 1 1 0] the counts would be [0 6 1] 9 | % (note that the odd counts are always the numbers of zeros). Instead of 10 | % storing the counts directly, additional compression is achieved with a 11 | % variable bitrate representation based on a common scheme called LEB128. 12 | % 13 | % Compression is greatest given large piecewise constant regions. 14 | % Specifically, the size of the RLE is proportional to the number of 15 | % *boundaries* in M (or for an image the number of boundaries in the y 16 | % direction). Assuming fairly simple shapes, the RLE representation is 17 | % O(sqrt(n)) where n is number of pixels in the object. Hence space usage 18 | % is substantially lower, especially for large simple objects (large n). 19 | % 20 | % Many common operations on masks can be computed directly using the RLE 21 | % (without need for decoding). This includes computations such as area, 22 | % union, intersection, etc. All of these operations are linear in the 23 | % size of the RLE, in other words they are O(sqrt(n)) where n is the area 24 | % of the object. Computing these operations on the original mask is O(n). 25 | % Thus, using the RLE can result in substantial computational savings. 26 | % 27 | % The following API functions are defined: 28 | % encode - Encode binary masks using RLE. 29 | % decode - Decode binary masks encoded via RLE. 30 | % merge - Compute union or intersection of encoded masks. 31 | % iou - Compute intersection over union between masks. 32 | % nms - Compute non-maximum suppression between ordered masks. 33 | % area - Compute area of encoded masks. 34 | % toBbox - Get bounding boxes surrounding encoded masks. 35 | % frBbox - Convert bounding boxes to encoded masks. 36 | % frPoly - Convert polygon to encoded mask. 37 | % 38 | % Usage: 39 | % Rs = MaskApi.encode( masks ) 40 | % masks = MaskApi.decode( Rs ) 41 | % R = MaskApi.merge( Rs, [intersect=false] ) 42 | % o = MaskApi.iou( dt, gt, [iscrowd=false] ) 43 | % keep = MaskApi.nms( dt, thr ) 44 | % a = MaskApi.area( Rs ) 45 | % bbs = MaskApi.toBbox( Rs ) 46 | % Rs = MaskApi.frBbox( bbs, h, w ) 47 | % R = MaskApi.frPoly( poly, h, w ) 48 | % 49 | % In the API the following formats are used: 50 | % R,Rs - [struct] Run-length encoding of binary mask(s) 51 | % masks - [hxwxn] Binary mask(s) (must have type uint8) 52 | % bbs - [nx4] Bounding box(es) stored as [x y w h] 53 | % poly - Polygon stored as {[x1 y1 x2 y2...],[x1 y1 ...],...} 54 | % dt,gt - May be either bounding boxes or encoded masks 55 | % Both poly and bbs are 0-indexed (bbox=[0 0 1 1] encloses first pixel). 56 | % 57 | % Finally, a note about the intersection over union (iou) computation. 58 | % The standard iou of a ground truth (gt) and detected (dt) object is 59 | % iou(gt,dt) = area(intersect(gt,dt)) / area(union(gt,dt)) 60 | % For "crowd" regions, we use a modified criteria. If a gt object is 61 | % marked as "iscrowd", we allow a dt to match any subregion of the gt. 62 | % Choosing gt' in the crowd gt that best matches the dt can be done using 63 | % gt'=intersect(dt,gt). Since by definition union(gt',dt)=dt, computing 64 | % iou(gt,dt,iscrowd) = iou(gt',dt) = area(intersect(gt,dt)) / area(dt) 65 | % For crowd gt regions we use this modified criteria above for the iou. 66 | % 67 | % To compile use the following (some precompiled binaries are included): 68 | % mex('CFLAGS=\$CFLAGS -Wall -std=c99','-largeArrayDims',... 69 | % 'private/maskApiMex.c','../common/maskApi.c',... 70 | % '-I../common/','-outdir','private'); 71 | % Please do not contact us for help with compiling. 72 | % 73 | % Microsoft COCO Toolbox. version 2.0 74 | % Data, paper, and tutorials available at: http://mscoco.org/ 75 | % Code written by Piotr Dollar and Tsung-Yi Lin, 2015. 76 | % Licensed under the Simplified BSD License [see coco/license.txt] 77 | 78 | methods( Static ) 79 | function Rs = encode( masks ) 80 | Rs = maskApiMex( 'encode', masks ); 81 | end 82 | 83 | function masks = decode( Rs ) 84 | masks = maskApiMex( 'decode', Rs ); 85 | end 86 | 87 | function R = merge( Rs, varargin ) 88 | R = maskApiMex( 'merge', Rs, varargin{:} ); 89 | end 90 | 91 | function o = iou( dt, gt, varargin ) 92 | o = maskApiMex( 'iou', dt', gt', varargin{:} ); 93 | end 94 | 95 | function keep = nms( dt, thr ) 96 | keep = maskApiMex('nms',dt',thr); 97 | end 98 | 99 | function a = area( Rs ) 100 | a = maskApiMex( 'area', Rs ); 101 | end 102 | 103 | function bbs = toBbox( Rs ) 104 | bbs = maskApiMex( 'toBbox', Rs )'; 105 | end 106 | 107 | function Rs = frBbox( bbs, h, w ) 108 | Rs = maskApiMex( 'frBbox', bbs', h, w ); 109 | end 110 | 111 | function R = frPoly( poly, h, w ) 112 | R = maskApiMex( 'frPoly', poly, h , w ); 113 | end 114 | end 115 | 116 | end 117 | -------------------------------------------------------------------------------- /cocoapi/PythonAPI/pycocotools/_cocoeval.pyx: -------------------------------------------------------------------------------- 1 | ''' 2 | Functions to make pycocotools functions fast 3 | ''' 4 | from _mask import iou 5 | cimport numpy as np 6 | import numpy as np 7 | cimport cython 8 | 9 | def computeIoU(dict _gts, dict _dts, int[:] imgIds, int[:] catIds, int useCats, \ 10 | int maxDets, str iouType): 11 | ''' 12 | :param imgIds: list of all images 13 | :param catIds: list of all categories (if useCats is False, then we do not need these) 14 | ''' 15 | cdef dict all_ious = {} # return value 16 | cdef (int, int) key 17 | cdef int[:] catIdsToUse = catIds if useCats else np.array([-1], dtype=np.int32) 18 | cdef list gt, dt 19 | cdef list g, d 20 | cdef dict _g, _d, o 21 | cdef list iscrowd 22 | cdef int[:] inds 23 | 24 | for imgId in imgIds: 25 | for catId in catIdsToUse: 26 | if useCats: 27 | key = (imgId, catId) 28 | gt = _gts.get(key, []) 29 | dt = _dts.get(key, []) 30 | else: 31 | # No catIDs to use here 32 | key = (imgId, -1) 33 | gt = [_ for cId in catIds for _ in _gts.get((imgId, cId), [])] 34 | dt = [_ for cId in catIds for _ in _dts.get((imgId, cId), [])] 35 | 36 | if len(gt) == 0 and len(dt) == 0: 37 | all_ious[key] = [] 38 | continue 39 | 40 | inds = np.argsort([-_d['score'] for _d in dt], kind='mergesort').astype(np.int32) 41 | dt = [dt[i] for i in inds] 42 | if len(dt) > maxDets: 43 | dt=dt[0:maxDets] 44 | 45 | if iouType == 'segm': 46 | g = [_g['segmentation'] for _g in gt] 47 | d = [_d['segmentation'] for _d in dt] 48 | elif iouType == 'bbox': 49 | g = [_g['bbox'] for _g in gt] 50 | d = [_d['bbox'] for _d in dt] 51 | else: 52 | raise Exception('unknown iouType for iou computation') 53 | # compute iou between each dt and gt region 54 | iscrowd = [int(o['iscrowd']) for o in gt] 55 | ious = iou(d, g, iscrowd) 56 | all_ious[key] = ious 57 | return all_ious 58 | 59 | def evaluateImg(int[:] catIds, int[:] imgIds, float[:, :] areaRng, int useCats, dict _gts, dict _dts, int maxDet, float[:] iouThrs, dict all_ious): 60 | ''' 61 | perform evaluation for single category and image 62 | :return: dict (single image results) 63 | ''' 64 | cdef int[:] catIdsToUse = catIds if useCats else np.array([-1], dtype=np.int32) 65 | cdef int catId, imgId 66 | cdef float[:] aRng 67 | cdef list gt, dt 68 | cdef list ret_dict = [] 69 | cdef int[:] gtind, dtind 70 | cdef list iscrowd 71 | cdef double[:, :] ious 72 | 73 | cdef int T, G, D 74 | cdef long[:, :] gtm, dtm 75 | cdef int[:] gtIg 76 | cdef int[:, :] dtIg 77 | cdef np.ndarray[np.uint8_t, ndim=2, cast=True] dtIgtmp 78 | cdef int tind, dind 79 | cdef int m 80 | cdef dict d, g 81 | cdef float t 82 | # cdef np[:, :] a 83 | cdef np.ndarray[np.uint8_t, ndim=2, cast=True] a 84 | cdef int valid_ioumat 85 | cdef int cId 86 | 87 | T = len(iouThrs) 88 | 89 | for catId in catIdsToUse: 90 | for aRng in areaRng: 91 | for imgId in imgIds: 92 | # begin evaluation 93 | if useCats: 94 | gt = _gts.get((imgId, catId), []) 95 | dt = _dts.get((imgId, catId), []) 96 | else: 97 | gt = [_ for cId in catIds for _ in _gts.get((imgId, cId), [])] 98 | dt = [_ for cId in catIds for _ in _dts.get((imgId, cId), [])] 99 | 100 | if len(gt) == 0 and len(dt) == 0: 101 | ret_dict.append(None) 102 | continue 103 | 104 | for g in gt: 105 | if g['ignore'] or (g['area']aRng[1]): 106 | g['_ignore'] = int(1) 107 | else: 108 | g['_ignore'] = int(0) 109 | 110 | gtind = np.argsort([g['_ignore'] for g in gt], kind='mergesort').astype(np.int32) 111 | gt = [gt[i] for i in gtind] 112 | dtind = np.argsort([-d['score'] for d in dt], kind='mergesort').astype(np.int32) 113 | dt = [dt[i] for i in dtind[0:maxDet]] 114 | iscrowd = [int(o['iscrowd']) for o in gt] 115 | # get ious 116 | ious = all_ious[imgId, catId][:, gtind] if len(all_ious[imgId, catId]) > 0 else np.array([[]], dtype=np.float64) 117 | valid_ioumat = int(len(all_ious[imgId, catId]) > 0) 118 | G = len(gt) 119 | D = len(dt) 120 | gtm = np.zeros((T,G), dtype=np.int64) 121 | dtm = np.zeros((T,D), dtype=np.int64) 122 | gtIg = np.array([g['_ignore'] for g in gt], dtype=np.int32) 123 | dtIg = np.zeros((T,D), dtype=np.int32) 124 | if not (not valid_ioumat or ious.shape[0] == 0): 125 | # for tind, t in enumerate(p.iouThrs): 126 | for tind in range(T): 127 | t = iouThrs[tind] 128 | for dind in range(D): 129 | d = dt[dind] 130 | # information about best match so far (m=-1 -> unmatched) 131 | iou = float(min([t, 1-1e-10])) 132 | m = -1 133 | for gind in range(G): 134 | g = gt[gind] 135 | # if this gt already matched, and not a crowd, continue 136 | if gtm[tind,gind]>0 and not iscrowd[gind]: 137 | continue 138 | # if dt matched to reg gt, and on ignore gt, stop 139 | if m>-1 and gtIg[m]==0 and gtIg[gind]==1: 140 | break 141 | # continue to next gt unless better match made 142 | if ious[dind,gind] < iou: 143 | continue 144 | # if match successful and best so far, store appropriately 145 | iou=ious[dind,gind] 146 | m=gind 147 | # if match made store id of match for both dt and gt 148 | if m == -1: 149 | continue 150 | dtIg[tind,dind] = gtIg[m] 151 | dtm[tind,dind] = gt[m]['id'] 152 | gtm[tind,m] = d['id'] 153 | # set unmatched detections outside of area range to ignore 154 | a = np.array([[d['area']aRng[1] for d in dt]], dtype=bool) 155 | dtIgbool = np.logical_or(np.asarray(dtIg)>0, np.logical_and(np.asarray(dtm)==0, np.repeat(a,T,0))) 156 | 157 | # store results for given image and category 158 | ret_dict.append({ 159 | 'image_id': imgId, 160 | 'category_id': catId, 161 | 'aRng': list(aRng), 162 | 'maxDet': maxDet, 163 | 'dtIds': [d['id'] for d in dt], 164 | 'gtIds': [g['id'] for g in gt], 165 | 'dtMatches': np.array(dtm), 166 | 'gtMatches': np.array(gtm), 167 | 'dtScores': [d['score'] for d in dt], 168 | 'gtIgnore': np.array(gtIg), 169 | 'dtIgnore': dtIgbool, 170 | 'ious' : np.array(ious) if valid_ioumat else [], 171 | }) 172 | return ret_dict 173 | -------------------------------------------------------------------------------- /cocoapi/common/maskApi.c: -------------------------------------------------------------------------------- 1 | /************************************************************************** 2 | * Microsoft COCO Toolbox. version 2.0 3 | * Data, paper, and tutorials available at: http://mscoco.org/ 4 | * Code written by Piotr Dollar and Tsung-Yi Lin, 2015. 5 | * Licensed under the Simplified BSD License [see coco/license.txt] 6 | **************************************************************************/ 7 | #include "maskApi.h" 8 | #include 9 | #include 10 | 11 | uint umin( uint a, uint b ) { return (ab) ? a : b; } 13 | 14 | void rleInit( RLE *R, siz h, siz w, siz m, uint *cnts ) { 15 | R->h=h; R->w=w; R->m=m; R->cnts=(m==0)?0:malloc(sizeof(uint)*m); 16 | siz j; if(cnts) for(j=0; jcnts[j]=cnts[j]; 17 | } 18 | 19 | void rleFree( RLE *R ) { 20 | free(R->cnts); R->cnts=0; 21 | } 22 | 23 | void rlesInit( RLE **R, siz n ) { 24 | siz i; *R = (RLE*) malloc(sizeof(RLE)*n); 25 | for(i=0; i0 ) { 61 | c=umin(ca,cb); cc+=c; ct=0; 62 | ca-=c; if(!ca && a0) { 83 | crowd=iscrowd!=NULL && iscrowd[g]; 84 | if(dt[d].h!=gt[g].h || dt[d].w!=gt[g].w) { o[g*m+d]=-1; continue; } 85 | siz ka, kb, a, b; uint c, ca, cb, ct, i, u; int va, vb; 86 | ca=dt[d].cnts[0]; ka=dt[d].m; va=vb=0; 87 | cb=gt[g].cnts[0]; kb=gt[g].m; a=b=1; i=u=0; ct=1; 88 | while( ct>0 ) { 89 | c=umin(ca,cb); if(va||vb) { u+=c; if(va&&vb) i+=c; } ct=0; 90 | ca-=c; if(!ca && athr) keep[j]=0; 105 | } 106 | } 107 | } 108 | 109 | void bbIou( BB dt, BB gt, siz m, siz n, byte *iscrowd, double *o ) { 110 | double h, w, i, u, ga, da; siz g, d; int crowd; 111 | for( g=0; gthr) keep[j]=0; 129 | } 130 | } 131 | } 132 | 133 | void rleToBbox( const RLE *R, BB bb, siz n ) { 134 | siz i; for( i=0; id?1:c=dy && xs>xe) || (dxye); 174 | if(flip) { t=xs; xs=xe; xe=t; t=ys; ys=ye; ye=t; } 175 | s = dx>=dy ? (double)(ye-ys)/dx : (double)(xe-xs)/dy; 176 | if(dx>=dy) for( d=0; d<=dx; d++ ) { 177 | t=flip?dx-d:d; u[m]=t+xs; v[m]=(int)(ys+s*t+.5); m++; 178 | } else for( d=0; d<=dy; d++ ) { 179 | t=flip?dy-d:d; v[m]=t+ys; u[m]=(int)(xs+s*t+.5); m++; 180 | } 181 | } 182 | /* get points along y-boundary and downsample */ 183 | free(x); free(y); k=m; m=0; double xd, yd; 184 | x=malloc(sizeof(int)*k); y=malloc(sizeof(int)*k); 185 | for( j=1; jw-1 ) continue; 188 | yd=(double)(v[j]h) yd=h; yd=ceil(yd); 190 | x[m]=(int) xd; y[m]=(int) yd; m++; 191 | } 192 | /* compute rle encoding given y-boundary points */ 193 | k=m; a=malloc(sizeof(uint)*(k+1)); 194 | for( j=0; j0) b[m++]=a[j++]; else { 200 | j++; if(jm, p=0; long x; int more; 207 | char *s=malloc(sizeof(char)*m*6); 208 | for( i=0; icnts[i]; if(i>2) x-=(long) R->cnts[i-2]; more=1; 210 | while( more ) { 211 | char c=x & 0x1f; x >>= 5; more=(c & 0x10) ? x!=-1 : x!=0; 212 | if(more) c |= 0x20; c+=48; s[p++]=c; 213 | } 214 | } 215 | s[p]=0; return s; 216 | } 217 | 218 | void rleFrString( RLE *R, char *s, siz h, siz w ) { 219 | siz m=0, p=0, k; long x; int more; uint *cnts; 220 | while( s[m] ) m++; cnts=malloc(sizeof(uint)*m); m=0; 221 | while( s[p] ) { 222 | x=0; k=0; more=1; 223 | while( more ) { 224 | char c=s[p]-48; x |= (c & 0x1f) << 5*k; 225 | more = c & 0x20; p++; k++; 226 | if(!more && (c & 0x10)) x |= -1 << 5*k; 227 | } 228 | if(m>2) x+=(long) cnts[m-2]; cnts[m++]=(uint) x; 229 | } 230 | rleInit(R,h,w,m,cnts); free(cnts); 231 | } 232 | -------------------------------------------------------------------------------- /cocoapi/MatlabAPI/private/gasonMex.cpp: -------------------------------------------------------------------------------- 1 | /************************************************************************** 2 | * Microsoft COCO Toolbox. version 2.0 3 | * Data, paper, and tutorials available at: http://mscoco.org/ 4 | * Code written by Piotr Dollar and Tsung-Yi Lin, 2015. 5 | * Licensed under the Simplified BSD License [see coco/license.txt] 6 | **************************************************************************/ 7 | #include "gason.h" 8 | #include "mex.h" 9 | #include "string.h" 10 | #include "math.h" 11 | #include 12 | #include 13 | #include 14 | typedef std::ostringstream ostrm; 15 | typedef unsigned long siz; 16 | typedef unsigned short ushort; 17 | 18 | siz length( const JsonValue &a ) { 19 | // get number of elements in JSON_ARRAY or JSON_OBJECT 20 | siz k=0; auto n=a.toNode(); while(n) { k++; n=n->next; } return k; 21 | } 22 | 23 | bool isRegularObjArray( const JsonValue &a ) { 24 | // check if all JSON_OBJECTs in JSON_ARRAY have the same fields 25 | JsonValue o=a.toNode()->value; siz k, n; const char **keys; 26 | n=length(o); keys=new const char*[n]; 27 | k=0; for(auto j:o) keys[k++]=j->key; 28 | for( auto i:a ) { 29 | if(length(i->value)!=n) return false; k=0; 30 | for(auto j:i->value) if(strcmp(j->key,keys[k++])) return false; 31 | } 32 | delete [] keys; return true; 33 | } 34 | 35 | mxArray* json( const JsonValue &o ) { 36 | // convert JsonValue to Matlab mxArray 37 | siz k, m, n; mxArray *M; const char **keys; 38 | switch( o.getTag() ) { 39 | case JSON_NUMBER: 40 | return mxCreateDoubleScalar(o.toNumber()); 41 | case JSON_STRING: 42 | return mxCreateString(o.toString()); 43 | case JSON_ARRAY: { 44 | if(!o.toNode()) return mxCreateDoubleMatrix(1,0,mxREAL); 45 | JsonValue o0=o.toNode()->value; JsonTag tag=o0.getTag(); 46 | n=length(o); bool isRegular=true; 47 | for(auto i:o) isRegular=isRegular && i->value.getTag()==tag; 48 | if( isRegular && tag==JSON_OBJECT && isRegularObjArray(o) ) { 49 | m=length(o0); keys=new const char*[m]; 50 | k=0; for(auto j:o0) keys[k++]=j->key; 51 | M = mxCreateStructMatrix(1,n,m,keys); 52 | k=0; for(auto i:o) { m=0; for(auto j:i->value) 53 | mxSetFieldByNumber(M,k,m++,json(j->value)); k++; } 54 | delete [] keys; return M; 55 | } else if( isRegular && tag==JSON_NUMBER ) { 56 | M = mxCreateDoubleMatrix(1,n,mxREAL); double *p=mxGetPr(M); 57 | k=0; for(auto i:o) p[k++]=i->value.toNumber(); return M; 58 | } else { 59 | M = mxCreateCellMatrix(1,n); 60 | k=0; for(auto i:o) mxSetCell(M,k++,json(i->value)); 61 | return M; 62 | } 63 | } 64 | case JSON_OBJECT: 65 | if(!o.toNode()) return mxCreateStructMatrix(1,0,0,NULL); 66 | n=length(o); keys=new const char*[n]; 67 | k=0; for(auto i:o) keys[k++]=i->key; 68 | M = mxCreateStructMatrix(1,1,n,keys); k=0; 69 | for(auto i:o) mxSetFieldByNumber(M,0,k++,json(i->value)); 70 | delete [] keys; return M; 71 | case JSON_TRUE: 72 | return mxCreateDoubleScalar(1); 73 | case JSON_FALSE: 74 | return mxCreateDoubleScalar(0); 75 | case JSON_NULL: 76 | return mxCreateDoubleMatrix(0,0,mxREAL); 77 | default: return NULL; 78 | } 79 | } 80 | 81 | template ostrm& json( ostrm &S, T *A, siz n ) { 82 | // convert numeric array to JSON string with casting 83 | if(n==0) { S<<"[]"; return S; } if(n==1) { S< ostrm& json( ostrm &S, T *A, siz n ) { 89 | // convert numeric array to JSON string without casting 90 | return json(S,A,n); 91 | } 92 | 93 | ostrm& json( ostrm &S, const char *A ) { 94 | // convert char array to JSON string (handle escape characters) 95 | #define RPL(a,b) case a: { S << b; A++; break; } 96 | S << "\""; while( *A>0 ) switch( *A ) { 97 | RPL('"',"\\\""); RPL('\\',"\\\\"); RPL('/',"\\/"); RPL('\b',"\\b"); 98 | RPL('\f',"\\f"); RPL('\n',"\\n"); RPL('\r',"\\r"); RPL('\t',"\\t"); 99 | default: S << *A; A++; 100 | } 101 | S << "\""; return S; 102 | } 103 | 104 | ostrm& json( ostrm& S, const JsonValue *o ) { 105 | // convert JsonValue to JSON string 106 | switch( o->getTag() ) { 107 | case JSON_NUMBER: S << o->toNumber(); return S; 108 | case JSON_TRUE: S << "true"; return S; 109 | case JSON_FALSE: S << "false"; return S; 110 | case JSON_NULL: S << "null"; return S; 111 | case JSON_STRING: return json(S,o->toString()); 112 | case JSON_ARRAY: 113 | S << "["; for(auto i:*o) { 114 | json(S,&i->value) << (i->next ? "," : ""); } 115 | S << "]"; return S; 116 | case JSON_OBJECT: 117 | S << "{"; for(auto i:*o) { 118 | json(S,i->key) << ":"; 119 | json(S,&i->value) << (i->next ? "," : ""); } 120 | S << "}"; return S; 121 | default: return S; 122 | } 123 | } 124 | 125 | ostrm& json( ostrm& S, const mxArray *M ) { 126 | // convert Matlab mxArray to JSON string 127 | siz i, j, m, n=mxGetNumberOfElements(M); 128 | void *A=mxGetData(M); ostrm *nms; 129 | switch( mxGetClassID(M) ) { 130 | case mxDOUBLE_CLASS: return json(S,(double*) A,n); 131 | case mxSINGLE_CLASS: return json(S,(float*) A,n); 132 | case mxINT64_CLASS: return json(S,(int64_t*) A,n); 133 | case mxUINT64_CLASS: return json(S,(uint64_t*) A,n); 134 | case mxINT32_CLASS: return json(S,(int32_t*) A,n); 135 | case mxUINT32_CLASS: return json(S,(uint32_t*) A,n); 136 | case mxINT16_CLASS: return json(S,(int16_t*) A,n); 137 | case mxUINT16_CLASS: return json(S,(uint16_t*) A,n); 138 | case mxINT8_CLASS: return json(S,(int8_t*) A,n); 139 | case mxUINT8_CLASS: return json(S,(uint8_t*) A,n); 140 | case mxLOGICAL_CLASS: return json(S,(uint8_t*) A,n); 141 | case mxCHAR_CLASS: return json(S,mxArrayToString(M)); 142 | case mxCELL_CLASS: 143 | S << "["; for(i=0; i0) json(S,mxGetCell(M,n-1)); S << "]"; return S; 145 | case mxSTRUCT_CLASS: 146 | if(n==0) { S<<"{}"; return S; } m=mxGetNumberOfFields(M); 147 | if(m==0) { S<<"["; for(i=0; i1) S<<"["; nms=new ostrm[m]; 149 | for(j=0; j1) S<<"]"; delete [] nms; return S; 156 | default: 157 | mexErrMsgTxt( "Unknown type." ); return S; 158 | } 159 | } 160 | 161 | mxArray* mxCreateStringRobust( const char* str ) { 162 | // convert char* to Matlab string (robust version of mxCreateString) 163 | mxArray *M; ushort *c; mwSize n[2]={1,strlen(str)}; 164 | M=mxCreateCharArray(2,n); c=(ushort*) mxGetData(M); 165 | for( siz i=0; i1 ) mexErrMsgTxt("One output expected."); 182 | 183 | if(!strcmp(action,"convert")) { 184 | if( nr!=1 ) mexErrMsgTxt("One input expected."); 185 | if( mxGetClassID(pr[0])==mxCHAR_CLASS ) { 186 | // object = mexFunction( string ) 187 | char *str = mxArrayToStringRobust(pr[0]); 188 | int status = jsonParse(str, &endptr, &val, allocator); 189 | if( status != JSON_OK) mexErrMsgTxt(jsonStrError(status)); 190 | pl[0] = json(val); mxFree(str); 191 | } else { 192 | // string = mexFunction( object ) 193 | ostrm S; S << std::setprecision(12); json(S,pr[0]); 194 | pl[0]=mxCreateStringRobust(S.str().c_str()); 195 | } 196 | 197 | } else if(!strcmp(action,"split")) { 198 | // strings = mexFunction( string, k ) 199 | if( nr!=2 ) mexErrMsgTxt("Two input expected."); 200 | char *str = mxArrayToStringRobust(pr[0]); 201 | int status = jsonParse(str, &endptr, &val, allocator); 202 | if( status != JSON_OK) mexErrMsgTxt(jsonStrError(status)); 203 | if( val.getTag()!=JSON_ARRAY ) mexErrMsgTxt("Array expected"); 204 | siz i=0, t=0, n=length(val), k=(siz) mxGetScalar(pr[1]); 205 | k=(k>n)?n:(k<1)?1:k; k=ceil(n/ceil(double(n)/k)); 206 | pl[0]=mxCreateCellMatrix(1,k); ostrm S; S<value); t--; if(!o->next) t=0; S << (t ? "," : "]"); 210 | if(!t) mxSetCell(pl[0],i++,mxCreateStringRobust(S.str().c_str())); 211 | } 212 | 213 | } else if(!strcmp(action,"merge")) { 214 | // string = mexFunction( strings ) 215 | if( nr!=1 ) mexErrMsgTxt("One input expected."); 216 | if(!mxIsCell(pr[0])) mexErrMsgTxt("Cell array expected."); 217 | siz n = mxGetNumberOfElements(pr[0]); 218 | ostrm S; S << std::setprecision(12); S << "["; 219 | for( siz i=0; ivalue) << (j->next ? "," : ""); 225 | mxFree(str); if(i 4 | 5 | #define JSON_ZONE_SIZE 4096 6 | #define JSON_STACK_SIZE 32 7 | 8 | const char *jsonStrError(int err) { 9 | switch (err) { 10 | #define XX(no, str) \ 11 | case JSON_##no: \ 12 | return str; 13 | JSON_ERRNO_MAP(XX) 14 | #undef XX 15 | default: 16 | return "unknown"; 17 | } 18 | } 19 | 20 | void *JsonAllocator::allocate(size_t size) { 21 | size = (size + 7) & ~7; 22 | 23 | if (head && head->used + size <= JSON_ZONE_SIZE) { 24 | char *p = (char *)head + head->used; 25 | head->used += size; 26 | return p; 27 | } 28 | 29 | size_t allocSize = sizeof(Zone) + size; 30 | Zone *zone = (Zone *)malloc(allocSize <= JSON_ZONE_SIZE ? JSON_ZONE_SIZE : allocSize); 31 | if (zone == nullptr) 32 | return nullptr; 33 | zone->used = allocSize; 34 | if (allocSize <= JSON_ZONE_SIZE || head == nullptr) { 35 | zone->next = head; 36 | head = zone; 37 | } else { 38 | zone->next = head->next; 39 | head->next = zone; 40 | } 41 | return (char *)zone + sizeof(Zone); 42 | } 43 | 44 | void JsonAllocator::deallocate() { 45 | while (head) { 46 | Zone *next = head->next; 47 | free(head); 48 | head = next; 49 | } 50 | } 51 | 52 | static inline bool isspace(char c) { 53 | return c == ' ' || (c >= '\t' && c <= '\r'); 54 | } 55 | 56 | static inline bool isdelim(char c) { 57 | return c == ',' || c == ':' || c == ']' || c == '}' || isspace(c) || !c; 58 | } 59 | 60 | static inline bool isdigit(char c) { 61 | return c >= '0' && c <= '9'; 62 | } 63 | 64 | static inline bool isxdigit(char c) { 65 | return (c >= '0' && c <= '9') || ((c & ~' ') >= 'A' && (c & ~' ') <= 'F'); 66 | } 67 | 68 | static inline int char2int(char c) { 69 | if (c <= '9') 70 | return c - '0'; 71 | return (c & ~' ') - 'A' + 10; 72 | } 73 | 74 | static double string2double(char *s, char **endptr) { 75 | char ch = *s; 76 | if (ch == '-') 77 | ++s; 78 | 79 | double result = 0; 80 | while (isdigit(*s)) 81 | result = (result * 10) + (*s++ - '0'); 82 | 83 | if (*s == '.') { 84 | ++s; 85 | 86 | double fraction = 1; 87 | while (isdigit(*s)) { 88 | fraction *= 0.1; 89 | result += (*s++ - '0') * fraction; 90 | } 91 | } 92 | 93 | if (*s == 'e' || *s == 'E') { 94 | ++s; 95 | 96 | double base = 10; 97 | if (*s == '+') 98 | ++s; 99 | else if (*s == '-') { 100 | ++s; 101 | base = 0.1; 102 | } 103 | 104 | unsigned int exponent = 0; 105 | while (isdigit(*s)) 106 | exponent = (exponent * 10) + (*s++ - '0'); 107 | 108 | double power = 1; 109 | for (; exponent; exponent >>= 1, base *= base) 110 | if (exponent & 1) 111 | power *= base; 112 | 113 | result *= power; 114 | } 115 | 116 | *endptr = s; 117 | return ch == '-' ? -result : result; 118 | } 119 | 120 | static inline JsonNode *insertAfter(JsonNode *tail, JsonNode *node) { 121 | if (!tail) 122 | return node->next = node; 123 | node->next = tail->next; 124 | tail->next = node; 125 | return node; 126 | } 127 | 128 | static inline JsonValue listToValue(JsonTag tag, JsonNode *tail) { 129 | if (tail) { 130 | auto head = tail->next; 131 | tail->next = nullptr; 132 | return JsonValue(tag, head); 133 | } 134 | return JsonValue(tag, nullptr); 135 | } 136 | 137 | int jsonParse(char *s, char **endptr, JsonValue *value, JsonAllocator &allocator) { 138 | JsonNode *tails[JSON_STACK_SIZE]; 139 | JsonTag tags[JSON_STACK_SIZE]; 140 | char *keys[JSON_STACK_SIZE]; 141 | JsonValue o; 142 | int pos = -1; 143 | bool separator = true; 144 | JsonNode *node; 145 | *endptr = s; 146 | 147 | while (*s) { 148 | while (isspace(*s)) { 149 | ++s; 150 | if (!*s) break; 151 | } 152 | *endptr = s++; 153 | switch (**endptr) { 154 | case '-': 155 | if (!isdigit(*s) && *s != '.') { 156 | *endptr = s; 157 | return JSON_BAD_NUMBER; 158 | } 159 | case '0': 160 | case '1': 161 | case '2': 162 | case '3': 163 | case '4': 164 | case '5': 165 | case '6': 166 | case '7': 167 | case '8': 168 | case '9': 169 | o = JsonValue(string2double(*endptr, &s)); 170 | if (!isdelim(*s)) { 171 | *endptr = s; 172 | return JSON_BAD_NUMBER; 173 | } 174 | break; 175 | case '"': 176 | o = JsonValue(JSON_STRING, s); 177 | for (char *it = s; *s; ++it, ++s) { 178 | int c = *it = *s; 179 | if (c == '\\') { 180 | c = *++s; 181 | switch (c) { 182 | case '\\': 183 | case '"': 184 | case '/': 185 | *it = c; 186 | break; 187 | case 'b': 188 | *it = '\b'; 189 | break; 190 | case 'f': 191 | *it = '\f'; 192 | break; 193 | case 'n': 194 | *it = '\n'; 195 | break; 196 | case 'r': 197 | *it = '\r'; 198 | break; 199 | case 't': 200 | *it = '\t'; 201 | break; 202 | case 'u': 203 | c = 0; 204 | for (int i = 0; i < 4; ++i) { 205 | if (isxdigit(*++s)) { 206 | c = c * 16 + char2int(*s); 207 | } else { 208 | *endptr = s; 209 | return JSON_BAD_STRING; 210 | } 211 | } 212 | if (c < 0x80) { 213 | *it = c; 214 | } else if (c < 0x800) { 215 | *it++ = 0xC0 | (c >> 6); 216 | *it = 0x80 | (c & 0x3F); 217 | } else { 218 | *it++ = 0xE0 | (c >> 12); 219 | *it++ = 0x80 | ((c >> 6) & 0x3F); 220 | *it = 0x80 | (c & 0x3F); 221 | } 222 | break; 223 | default: 224 | *endptr = s; 225 | return JSON_BAD_STRING; 226 | } 227 | } else if ((unsigned int)c < ' ' || c == '\x7F') { 228 | *endptr = s; 229 | return JSON_BAD_STRING; 230 | } else if (c == '"') { 231 | *it = 0; 232 | ++s; 233 | break; 234 | } 235 | } 236 | if (!isdelim(*s)) { 237 | *endptr = s; 238 | return JSON_BAD_STRING; 239 | } 240 | break; 241 | case 't': 242 | if (!(s[0] == 'r' && s[1] == 'u' && s[2] == 'e' && isdelim(s[3]))) 243 | return JSON_BAD_IDENTIFIER; 244 | o = JsonValue(JSON_TRUE); 245 | s += 3; 246 | break; 247 | case 'f': 248 | if (!(s[0] == 'a' && s[1] == 'l' && s[2] == 's' && s[3] == 'e' && isdelim(s[4]))) 249 | return JSON_BAD_IDENTIFIER; 250 | o = JsonValue(JSON_FALSE); 251 | s += 4; 252 | break; 253 | case 'n': 254 | if (!(s[0] == 'u' && s[1] == 'l' && s[2] == 'l' && isdelim(s[3]))) 255 | return JSON_BAD_IDENTIFIER; 256 | o = JsonValue(JSON_NULL); 257 | s += 3; 258 | break; 259 | case ']': 260 | if (pos == -1) 261 | return JSON_STACK_UNDERFLOW; 262 | if (tags[pos] != JSON_ARRAY) 263 | return JSON_MISMATCH_BRACKET; 264 | o = listToValue(JSON_ARRAY, tails[pos--]); 265 | break; 266 | case '}': 267 | if (pos == -1) 268 | return JSON_STACK_UNDERFLOW; 269 | if (tags[pos] != JSON_OBJECT) 270 | return JSON_MISMATCH_BRACKET; 271 | if (keys[pos] != nullptr) 272 | return JSON_UNEXPECTED_CHARACTER; 273 | o = listToValue(JSON_OBJECT, tails[pos--]); 274 | break; 275 | case '[': 276 | if (++pos == JSON_STACK_SIZE) 277 | return JSON_STACK_OVERFLOW; 278 | tails[pos] = nullptr; 279 | tags[pos] = JSON_ARRAY; 280 | keys[pos] = nullptr; 281 | separator = true; 282 | continue; 283 | case '{': 284 | if (++pos == JSON_STACK_SIZE) 285 | return JSON_STACK_OVERFLOW; 286 | tails[pos] = nullptr; 287 | tags[pos] = JSON_OBJECT; 288 | keys[pos] = nullptr; 289 | separator = true; 290 | continue; 291 | case ':': 292 | if (separator || keys[pos] == nullptr) 293 | return JSON_UNEXPECTED_CHARACTER; 294 | separator = true; 295 | continue; 296 | case ',': 297 | if (separator || keys[pos] != nullptr) 298 | return JSON_UNEXPECTED_CHARACTER; 299 | separator = true; 300 | continue; 301 | case '\0': 302 | continue; 303 | default: 304 | return JSON_UNEXPECTED_CHARACTER; 305 | } 306 | 307 | separator = false; 308 | 309 | if (pos == -1) { 310 | *endptr = s; 311 | *value = o; 312 | return JSON_OK; 313 | } 314 | 315 | if (tags[pos] == JSON_OBJECT) { 316 | if (!keys[pos]) { 317 | if (o.getTag() != JSON_STRING) 318 | return JSON_UNQUOTED_KEY; 319 | keys[pos] = o.toString(); 320 | continue; 321 | } 322 | if ((node = (JsonNode *) allocator.allocate(sizeof(JsonNode))) == nullptr) 323 | return JSON_ALLOCATION_FAILURE; 324 | tails[pos] = insertAfter(tails[pos], node); 325 | tails[pos]->key = keys[pos]; 326 | keys[pos] = nullptr; 327 | } else { 328 | if ((node = (JsonNode *) allocator.allocate(sizeof(JsonNode) - sizeof(char *))) == nullptr) 329 | return JSON_ALLOCATION_FAILURE; 330 | tails[pos] = insertAfter(tails[pos], node); 331 | } 332 | tails[pos]->value = o; 333 | } 334 | return JSON_BREAKING_BAD; 335 | } 336 | -------------------------------------------------------------------------------- /cocoapi/LuaAPI/MaskApi.lua: -------------------------------------------------------------------------------- 1 | --[[---------------------------------------------------------------------------- 2 | 3 | Interface for manipulating masks stored in RLE format. 4 | 5 | For an overview of RLE please see http://mscoco.org/dataset/#download. 6 | Additionally, more detailed information can be found in the Matlab MaskApi.m: 7 | https://github.com/pdollar/coco/blob/master/MatlabAPI/MaskApi.m 8 | 9 | The following API functions are defined: 10 | encode - Encode binary masks using RLE. 11 | decode - Decode binary masks encoded via RLE. 12 | merge - Compute union or intersection of encoded masks. 13 | iou - Compute intersection over union between masks. 14 | nms - Compute non-maximum suppression between ordered masks. 15 | area - Compute area of encoded masks. 16 | toBbox - Get bounding boxes surrounding encoded masks. 17 | frBbox - Convert bounding boxes to encoded masks. 18 | frPoly - Convert polygon to encoded mask. 19 | drawCirc - Draw circle into image (alters input). 20 | drawLine - Draw line into image (alters input). 21 | drawMasks - Draw masks into image (alters input). 22 | 23 | Usage: 24 | Rs = MaskApi.encode( masks ) 25 | masks = MaskApi.decode( Rs ) 26 | R = MaskApi.merge( Rs, [intersect=false] ) 27 | o = MaskApi.iou( dt, gt, [iscrowd=false] ) 28 | keep = MaskApi.nms( dt, thr ) 29 | a = MaskApi.area( Rs ) 30 | bbs = MaskApi.toBbox( Rs ) 31 | Rs = MaskApi.frBbox( bbs, h, w ) 32 | R = MaskApi.frPoly( poly, h, w ) 33 | MaskApi.drawCirc( img, x, y, rad, clr ) 34 | MaskApi.drawLine( img, x0, y0, x1, y1, rad, clr ) 35 | MaskApi.drawMasks( img, masks, [maxn=n], [alpha=.4], [clrs] ) 36 | For detailed usage information please see cocoDemo.lua. 37 | 38 | In the API the following formats are used: 39 | R,Rs - [table] Run-length encoding of binary mask(s) 40 | masks - [nxhxw] Binary mask(s) 41 | bbs - [nx4] Bounding box(es) stored as [x y w h] 42 | poly - Polygon stored as {[x1 y1 x2 y2...],[x1 y1 ...],...} 43 | dt,gt - May be either bounding boxes or encoded masks 44 | Both poly and bbs are 0-indexed (bbox=[0 0 1 1] encloses first pixel). 45 | 46 | Common Objects in COntext (COCO) Toolbox. version 3.0 47 | Data, paper, and tutorials available at: http://mscoco.org/ 48 | Code written by Pedro O. Pinheiro and Piotr Dollar, 2016. 49 | Licensed under the Simplified BSD License [see coco/license.txt] 50 | 51 | ------------------------------------------------------------------------------]] 52 | 53 | local ffi = require 'ffi' 54 | local coco = require 'coco.env' 55 | 56 | coco.MaskApi = {} 57 | local MaskApi = coco.MaskApi 58 | 59 | coco.libmaskapi = ffi.load(package.searchpath('libmaskapi',package.cpath)) 60 | local libmaskapi = coco.libmaskapi 61 | 62 | -------------------------------------------------------------------------------- 63 | 64 | MaskApi.encode = function( masks ) 65 | local n, h, w = masks:size(1), masks:size(2), masks:size(3) 66 | masks = masks:type('torch.ByteTensor'):transpose(2,3) 67 | local data = masks:contiguous():data() 68 | local Qs = MaskApi._rlesInit(n) 69 | libmaskapi.rleEncode(Qs[0],data,h,w,n) 70 | return MaskApi._rlesToLua(Qs,n) 71 | end 72 | 73 | MaskApi.decode = function( Rs ) 74 | local Qs, n, h, w = MaskApi._rlesFrLua(Rs) 75 | local masks = torch.ByteTensor(n,w,h):zero():contiguous() 76 | libmaskapi.rleDecode(Qs,masks:data(),n) 77 | MaskApi._rlesFree(Qs,n) 78 | return masks:transpose(2,3) 79 | end 80 | 81 | MaskApi.merge = function( Rs, intersect ) 82 | intersect = intersect or 0 83 | local Qs, n, h, w = MaskApi._rlesFrLua(Rs) 84 | local Q = MaskApi._rlesInit(1) 85 | libmaskapi.rleMerge(Qs,Q,n,intersect) 86 | MaskApi._rlesFree(Qs,n) 87 | return MaskApi._rlesToLua(Q,1)[1] 88 | end 89 | 90 | MaskApi.iou = function( dt, gt, iscrowd ) 91 | if not iscrowd then iscrowd = NULL else 92 | iscrowd = iscrowd:type('torch.ByteTensor'):contiguous():data() 93 | end 94 | if torch.isTensor(gt) and torch.isTensor(dt) then 95 | local nDt, k = dt:size(1), dt:size(2); assert(k==4) 96 | local nGt, k = gt:size(1), gt:size(2); assert(k==4) 97 | local dDt = dt:type('torch.DoubleTensor'):contiguous():data() 98 | local dGt = gt:type('torch.DoubleTensor'):contiguous():data() 99 | local o = torch.DoubleTensor(nGt,nDt):contiguous() 100 | libmaskapi.bbIou(dDt,dGt,nDt,nGt,iscrowd,o:data()) 101 | return o:transpose(1,2) 102 | else 103 | local qDt, nDt = MaskApi._rlesFrLua(dt) 104 | local qGt, nGt = MaskApi._rlesFrLua(gt) 105 | local o = torch.DoubleTensor(nGt,nDt):contiguous() 106 | libmaskapi.rleIou(qDt,qGt,nDt,nGt,iscrowd,o:data()) 107 | MaskApi._rlesFree(qDt,nDt); MaskApi._rlesFree(qGt,nGt) 108 | return o:transpose(1,2) 109 | end 110 | end 111 | 112 | MaskApi.nms = function( dt, thr ) 113 | if torch.isTensor(dt) then 114 | local n, k = dt:size(1), dt:size(2); assert(k==4) 115 | local Q = dt:type('torch.DoubleTensor'):contiguous():data() 116 | local kp = torch.IntTensor(n):contiguous() 117 | libmaskapi.bbNms(Q,n,kp:data(),thr) 118 | return kp 119 | else 120 | local Q, n = MaskApi._rlesFrLua(dt) 121 | local kp = torch.IntTensor(n):contiguous() 122 | libmaskapi.rleNms(Q,n,kp:data(),thr) 123 | MaskApi._rlesFree(Q,n) 124 | return kp 125 | end 126 | end 127 | 128 | MaskApi.area = function( Rs ) 129 | local Qs, n, h, w = MaskApi._rlesFrLua(Rs) 130 | local a = torch.IntTensor(n):contiguous() 131 | libmaskapi.rleArea(Qs,n,a:data()) 132 | MaskApi._rlesFree(Qs,n) 133 | return a 134 | end 135 | 136 | MaskApi.toBbox = function( Rs ) 137 | local Qs, n, h, w = MaskApi._rlesFrLua(Rs) 138 | local bb = torch.DoubleTensor(n,4):contiguous() 139 | libmaskapi.rleToBbox(Qs,bb:data(),n) 140 | MaskApi._rlesFree(Qs,n) 141 | return bb 142 | end 143 | 144 | MaskApi.frBbox = function( bbs, h, w ) 145 | if bbs:dim()==1 then bbs=bbs:view(1,bbs:size(1)) end 146 | local n, k = bbs:size(1), bbs:size(2); assert(k==4) 147 | local data = bbs:type('torch.DoubleTensor'):contiguous():data() 148 | local Qs = MaskApi._rlesInit(n) 149 | libmaskapi.rleFrBbox(Qs[0],data,h,w,n) 150 | return MaskApi._rlesToLua(Qs,n) 151 | end 152 | 153 | MaskApi.frPoly = function( poly, h, w ) 154 | local n = #poly 155 | local Qs, Q = MaskApi._rlesInit(n), MaskApi._rlesInit(1) 156 | for i,p in pairs(poly) do 157 | local xy = p:type('torch.DoubleTensor'):contiguous():data() 158 | libmaskapi.rleFrPoly(Qs[i-1],xy,p:size(1)/2,h,w) 159 | end 160 | libmaskapi.rleMerge(Qs,Q[0],n,0) 161 | MaskApi._rlesFree(Qs,n) 162 | return MaskApi._rlesToLua(Q,1)[1] 163 | end 164 | 165 | -------------------------------------------------------------------------------- 166 | 167 | MaskApi.drawCirc = function( img, x, y, rad, clr ) 168 | assert(img:isContiguous() and img:dim()==3) 169 | local k, h, w, data = img:size(1), img:size(2), img:size(3), img:data() 170 | for dx=-rad,rad do for dy=-rad,rad do 171 | local xi, yi = torch.round(x+dx), torch.round(y+dy) 172 | if dx*dx+dy*dy<=rad*rad and xi>=0 and yi>=0 and xi=0 and yi>=0 and xi0) 59 | local isStr = torch.type(T[1])=='string' 60 | assert(isStr or torch.isTensor(T[1])) 61 | local c=function(s) return torch.CharTensor(torch.CharStorage():string(s)) end 62 | if isStr then local S=T; T={}; for i=1,n do T[i]=c(S[i]) end end 63 | local ms, idx = torch.LongTensor(n), torch.LongTensor(n+1) 64 | for i=1,n do ms[i]=T[i]:numel() end 65 | idx[1]=1; idx:narrow(1,2,n):copy(ms); idx=idx:cumsum() 66 | local type = string.sub(torch.type(T[1]),7,-1) 67 | local data = torch[type](idx[n+1]-1) 68 | if isStr then type='string' end 69 | for i=1,n do if ms[i]>0 then data:sub(idx[i],idx[i+1]-1):copy(T[i]) end end 70 | if ms:eq(ms[1]):all() and ms[1]>0 then data=data:view(n,ms[1]); idx=nil end 71 | self.data, self.idx, self.type = data, idx, type 72 | end 73 | 74 | function TensorTable:__index__( i ) 75 | if torch.type(i)~='number' then return false end 76 | local d, idx, type = self.data, self.idx, self.type 77 | if idx and idx[i]==idx[i+1] then 78 | if type=='string' then d='' else d=torch[type]() end 79 | else 80 | if idx then d=d:sub(idx[i],idx[i+1]-1) else d=d[i] end 81 | if type=='string' then d=d:clone():storage():string() end 82 | end 83 | return d, true 84 | end 85 | 86 | -------------------------------------------------------------------------------- 87 | 88 | --[[ CocoSeg is an efficient data structure for storing COCO segmentations. ]] 89 | 90 | function CocoSeg:__init( segs ) 91 | local polys, pIdx, sizes, rles, p, isStr = {}, {}, {}, {}, 0, 0 92 | for i,seg in pairs(segs) do if seg.size then isStr=seg.counts break end end 93 | isStr = torch.type(isStr)=='string' 94 | for i,seg in pairs(segs) do 95 | pIdx[i], sizes[i] = {}, {} 96 | if seg.size then 97 | sizes[i],rles[i] = seg.size,seg.counts 98 | else 99 | if isStr then rles[i]='' else rles[i]={} end 100 | for j=1,#seg do p=p+1; pIdx[i][j],polys[p] = p,seg[j] end 101 | end 102 | pIdx[i],sizes[i] = torch.LongTensor(pIdx[i]),torch.IntTensor(sizes[i]) 103 | if not isStr then rles[i]=torch.IntTensor(rles[i]) end 104 | end 105 | for i=1,p do polys[i]=torch.DoubleTensor(polys[i]) end 106 | self.polys, self.pIdx = coco.TensorTable(polys), coco.TensorTable(pIdx) 107 | self.sizes, self.rles = coco.TensorTable(sizes), coco.TensorTable(rles) 108 | end 109 | 110 | function CocoSeg:__index__( i ) 111 | if torch.type(i)~='number' then return false end 112 | if self.sizes[i]:numel()>0 then 113 | return {size=self.sizes[i],counts=self.rles[i]}, true 114 | else 115 | local ids, polys = self.pIdx[i], {} 116 | for i=1,ids:numel() do polys[i]=self.polys[ids[i]] end 117 | return polys, true 118 | end 119 | end 120 | 121 | -------------------------------------------------------------------------------- 122 | 123 | --[[ CocoApi is the API to the COCO dataset, see main comment for details. ]] 124 | 125 | function CocoApi:__init( annFile ) 126 | assert( string.sub(annFile,-4,-1)=='json' and paths.filep(annFile) ) 127 | local torchFile = string.sub(annFile,1,-6) .. '.t7' 128 | if not paths.filep(torchFile) then self:__convert(annFile,torchFile) end 129 | local data = torch.load(torchFile) 130 | self.data, self.inds = data, {} 131 | for k,v in pairs({images='img',categories='cat',annotations='ann'}) do 132 | local M = {}; self.inds[v..'IdsMap']=M 133 | if data[k] then for i=1,data[k].id:size(1) do M[data[k].id[i]]=i end end 134 | end 135 | end 136 | 137 | function CocoApi:__convert( annFile, torchFile ) 138 | print('convert: '..annFile..' --> .t7 [please be patient]') 139 | local tic = torch.tic() 140 | -- load data and decode json 141 | local data = torch.CharStorage(annFile):string() 142 | data = json.decode(data); collectgarbage() 143 | -- transpose and flatten each field in the coco data struct 144 | local convert = {images=true, categories=true, annotations=true} 145 | for field, d in pairs(data) do if convert[field] then 146 | print('converting: '..field) 147 | local n, out = #d, {} 148 | if n==0 then d,n={d},1 end 149 | for k,v in pairs(d[1]) do 150 | local t, isReg = torch.type(v), true 151 | for i=1,n do isReg=isReg and torch.type(d[i][k])==t end 152 | if t=='number' and isReg then 153 | out[k] = torch.DoubleTensor(n) 154 | for i=1,n do out[k][i]=d[i][k] end 155 | elseif t=='string' and isReg then 156 | out[k]={}; for i=1,n do out[k][i]=d[i][k] end 157 | out[k] = coco.TensorTable(out[k]) 158 | elseif t=='table' and isReg and torch.type(v[1])=='number' then 159 | out[k]={}; for i=1,n do out[k][i]=torch.DoubleTensor(d[i][k]) end 160 | out[k] = coco.TensorTable(out[k]) 161 | if not out[k].idx then out[k]=out[k].data end 162 | else 163 | out[k]={}; for i=1,n do out[k][i]=d[i][k] end 164 | if k=='segmentation' then out[k] = coco.CocoSeg(out[k]) end 165 | end 166 | collectgarbage() 167 | end 168 | if out.id then out.idx=torch.range(1,out.id:size(1)) end 169 | data[field] = out 170 | collectgarbage() 171 | end end 172 | -- create mapping from cat/img index to anns indices for that cat/img 173 | print('convert: building indices') 174 | local makeMap = function( type, type_id ) 175 | if not data[type] or not data.annotations then return nil end 176 | local invmap, n = {}, data[type].id:size(1) 177 | for i=1,n do invmap[data[type].id[i]]=i end 178 | local map = {}; for i=1,n do map[i]={} end 179 | data.annotations[type_id..'x'] = data.annotations[type_id]:clone() 180 | for i=1,data.annotations.id:size(1) do 181 | local id = invmap[data.annotations[type_id][i]] 182 | data.annotations[type_id..'x'][i] = id 183 | table.insert(map[id],data.annotations.id[i]) 184 | end 185 | for i=1,n do map[i]=torch.LongTensor(map[i]) end 186 | return coco.TensorTable(map) 187 | end 188 | data.annIdsPerImg = makeMap('images','image_id') 189 | data.annIdsPerCat = makeMap('categories','category_id') 190 | -- save to disk 191 | torch.save( torchFile, data ) 192 | print(('convert: complete [%.2f s]'):format(torch.toc(tic))) 193 | end 194 | 195 | function CocoApi:getAnnIds( filters ) 196 | if not filters then filters = {} end 197 | if filters.imgId then 198 | return self.data.annIdsPerImg[self.inds.imgIdsMap[filters.imgId]] or {} 199 | elseif filters.catId then 200 | return self.data.annIdsPerCat[self.inds.catIdsMap[filters.catId]] or {} 201 | else 202 | return self.data.annotations.id 203 | end 204 | end 205 | 206 | function CocoApi:getCatIds() 207 | return self.data.categories.id 208 | end 209 | 210 | function CocoApi:getImgIds() 211 | return self.data.images.id 212 | end 213 | 214 | function CocoApi:loadAnns( ids ) 215 | return self:__load(self.data.annotations,self.inds.annIdsMap,ids) 216 | end 217 | 218 | function CocoApi:loadCats( ids ) 219 | return self:__load(self.data.categories,self.inds.catIdsMap,ids) 220 | end 221 | 222 | function CocoApi:loadImgs( ids ) 223 | return self:__load(self.data.images,self.inds.imgIdsMap,ids) 224 | end 225 | 226 | function CocoApi:showAnns( img, anns ) 227 | local n, h, w = #anns, img:size(2), img:size(3) 228 | local MaskApi, clrs = coco.MaskApi, torch.rand(n,3)*.6+.4 229 | local O = img:clone():contiguous():float() 230 | if n==0 then anns,n={anns},1 end 231 | if anns[1].keypoints then for i=1,n do if anns[i].iscrowd==0 then 232 | local sk, kp, j, k = self:loadCats(anns[i].category_id)[1].skeleton 233 | kp=anns[i].keypoints; k=kp:size(1); j=torch.range(1,k,3):long(); k=k/3; 234 | local x,y,v = kp:index(1,j), kp:index(1,j+1), kp:index(1,j+2) 235 | for _,s in pairs(sk) do if v[s[1]]>0 and v[s[2]]>0 then 236 | MaskApi.drawLine(O,x[s[1]],y[s[1]],x[s[2]],y[s[2]],.75,clrs[i]) 237 | end end 238 | for j=1,k do if v[j]==1 then MaskApi.drawCirc(O,x[j],y[j],4,{0,0,0}) end end 239 | for j=1,k do if v[j]>0 then MaskApi.drawCirc(O,x[j],y[j],3,clrs[i]) end end 240 | end end end 241 | if anns[1].segmentation or anns[1].bbox then 242 | local Rs, alpha = {}, anns[1].keypoints and .25 or .4 243 | for i=1,n do 244 | Rs[i]=anns[i].segmentation 245 | if Rs[i] and #Rs[i]>0 then Rs[i]=MaskApi.frPoly(Rs[i],h,w) end 246 | if not Rs[i] then Rs[i]=MaskApi.frBbox(anns[i].bbox,h,w)[1] end 247 | end 248 | MaskApi.drawMasks(O,MaskApi.decode(Rs),nil,alpha,clrs) 249 | end 250 | return O 251 | end 252 | 253 | function CocoApi:__load( data, map, ids ) 254 | if not torch.isTensor(ids) then ids=torch.LongTensor({ids}) end 255 | local out, idx = {}, nil 256 | for i=1,ids:numel() do 257 | out[i], idx = {}, map[ids[i]] 258 | for k,v in pairs(data) do out[i][k]=v[idx] end 259 | end 260 | return out 261 | end 262 | -------------------------------------------------------------------------------- /detectron2/evaluation/namingerror_evaluator.py: -------------------------------------------------------------------------------- 1 | import contextlib 2 | import copy 3 | import io 4 | import itertools 5 | import json 6 | import logging 7 | import numpy as np 8 | import os 9 | import pickle 10 | from collections import OrderedDict, defaultdict 11 | import pycocotools.mask as mask_util 12 | from sklearn.metrics import confusion_matrix 13 | import torch 14 | from fvcore.common.file_io import PathManager 15 | from pycocotools.coco import COCO 16 | from tabulate import tabulate 17 | 18 | import detectron2.utils.comm as comm 19 | from detectron2.data import MetadataCatalog 20 | from detectron2.data.datasets.coco import convert_to_coco_json 21 | from pycocotools.cocoeval import NamingCOCOEval 22 | from detectron2.structures import Boxes, BoxMode, pairwise_iou 23 | from detectron2.utils.logger import create_small_table 24 | from .coco_evaluation import instances_to_coco_json 25 | 26 | from .evaluator import DatasetEvaluator 27 | 28 | 29 | class NamingErrorEvaluator(DatasetEvaluator): 30 | ''' 31 | This evaluator computes the Naming error (class agnostic matching in COCOEval followed by FP calculation) 32 | 33 | Auxiliary naming-mismatch errors (accuracy, etc.) are also provided for completeness 34 | ''' 35 | def __init__(self, dataset_name, cfg, distributed, output_dir=None): 36 | """ 37 | Args: 38 | dataset_name (str): name of the dataset to be evaluated. 39 | It must have either the following corresponding metadata: 40 | 41 | "json_file": the path to the COCO format annotation 42 | 43 | Or it must be in detectron2's standard dataset format 44 | so it can be converted to COCO format automatically. 45 | cfg (CfgNode): config instance 46 | distributed (True): if True, will collect results from all ranks and run evaluation 47 | in the main process. 48 | Otherwise, will evaluate the results in the current process. 49 | output_dir (str): optional, an output directory to dump all 50 | results predicted on the dataset. The dump contains two files: 51 | 52 | 1. "instance_predictions.pth" a file in torch serialization 53 | format that contains all the raw original predictions. 54 | 2. "coco_instances_results.json" a json file in COCO's result 55 | format. 56 | """ 57 | self._tasks = ('segm',) 58 | self._distributed = distributed 59 | self._output_dir = output_dir 60 | 61 | self._cpu_device = torch.device("cpu") 62 | self._logger = logging.getLogger(__name__) 63 | 64 | self._metadata = MetadataCatalog.get(dataset_name) 65 | if not hasattr(self._metadata, "json_file"): 66 | self._logger.info( 67 | f"'{dataset_name}' is not registered by `register_coco_instances`." 68 | " Therefore trying to convert it to COCO format ..." 69 | ) 70 | 71 | cache_path = os.path.join(output_dir, f"{dataset_name}_coco_format.json") 72 | self._metadata.json_file = cache_path 73 | convert_to_coco_json(dataset_name, cache_path) 74 | 75 | json_file = PathManager.get_local_path(self._metadata.json_file) 76 | with contextlib.redirect_stdout(io.StringIO()): 77 | self._coco_api = COCO(json_file) 78 | 79 | self._kpt_oks_sigmas = cfg.TEST.KEYPOINT_OKS_SIGMAS 80 | # Test set json files do not contain annotations (evaluation must be 81 | # performed using the COCO evaluation server). 82 | self._do_evaluation = "annotations" in self._coco_api.dataset 83 | 84 | def reset(self): 85 | self._predictions = [] 86 | 87 | def process(self, inputs, outputs): 88 | """ 89 | Args: 90 | inputs: the inputs to a COCO model (e.g., GeneralizedRCNN). 91 | It is a list of dict. Each dict corresponds to an image and 92 | contains keys like "height", "width", "file_name", "image_id". 93 | outputs: the outputs of a COCO model. It is a list of dicts with key 94 | "instances" that contains :class:`Instances`. 95 | """ 96 | for input, output in zip(inputs, outputs): 97 | prediction = {"image_id": input["image_id"]} 98 | if "instances" in output: 99 | instances = output["instances"].to(self._cpu_device) 100 | prediction["instances"] = instances_to_coco_json(instances, input["image_id"]) 101 | if "proposals" in output: 102 | prediction["proposals"] = output["proposals"].to(self._cpu_device) 103 | self._predictions.append(prediction) 104 | 105 | def evaluate(self): 106 | # Gather up all the predictions, then we will intercept the matched images from the 107 | # `COCOEval` class 108 | ## Gather up all predictions 109 | if self._distributed: 110 | comm.synchronize() 111 | predictions = comm.gather(self._predictions, dst=0) 112 | predictions = list(itertools.chain(*predictions)) 113 | 114 | if not comm.is_main_process(): 115 | return {} 116 | else: 117 | predictions = self._predictions 118 | 119 | if len(predictions) == 0: 120 | self._logger.warning("[COCOEvaluator] Did not receive valid predictions.") 121 | return {} 122 | 123 | self._results = OrderedDict() 124 | if "instances" in predictions[0]: 125 | self._eval_predictions(set(self._tasks), predictions) 126 | # Copy so the caller can do whatever with results 127 | return copy.deepcopy(self._results) 128 | 129 | def _eval_predictions(self, tasks, predictions): 130 | """ 131 | Evaluate predictions on the given tasks. 132 | Fill self._results with the metrics of the tasks. 133 | """ 134 | self._logger.info("Preparing results for COCO format ...") 135 | coco_results = list(itertools.chain(*[x["instances"] for x in predictions])) 136 | 137 | # unmap the category ids for COCO 138 | if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"): 139 | reverse_id_mapping = { 140 | v: k for k, v in self._metadata.thing_dataset_id_to_contiguous_id.items() 141 | } 142 | for result in coco_results: 143 | category_id = result["category_id"] 144 | assert ( 145 | category_id in reverse_id_mapping 146 | ), "A prediction has category_id={}, which is not available in the dataset.".format( 147 | category_id 148 | ) 149 | result["category_id"] = reverse_id_mapping[category_id] 150 | 151 | if not self._do_evaluation: 152 | self._logger.info("Annotations are not available for evaluation.") 153 | return 154 | 155 | self._logger.info("Evaluating predictions ...") 156 | for task in sorted(tasks): 157 | coco_eval = ( 158 | _evaluate_predictions_on_coco( 159 | self._coco_api, coco_results, task, kpt_oks_sigmas=self._kpt_oks_sigmas 160 | ) 161 | if len(coco_results) > 0 162 | else None # cocoapi does not handle empty results very well 163 | ) 164 | res = self._derive_coco_results( 165 | coco_eval, task, class_names=self._metadata.get("thing_classes") 166 | ) 167 | self._results['ne-' + task] = res 168 | 169 | def _derive_coco_results(self, coco_eval, iou_type, class_names=None): 170 | """ 171 | Use the COCOeval matches to derive tp, fp, fn => precision, recall for each image/category 172 | Next, for each (class, IoU threshold, object size), aggregate the F1-score (average them). 173 | 174 | Derive the desired score numbers from COCOeval. 175 | 176 | Args: 177 | coco_eval (None or COCOEval): None represents no predictions from model. 178 | iou_type (str): 179 | class_names (None or list[str]): if provided, will use it to predict 180 | per-category AP. 181 | 182 | Returns: 183 | a dict of {metric name: score} 184 | """ 185 | 186 | # Final metric 187 | metrics = ['accuracy'] 188 | 189 | # Return nans if there are no predictions! 190 | if coco_eval is None: 191 | self._logger.warn("No predictions from the model!") 192 | return {metric: float("nan") for metric in metrics} 193 | 194 | # Calculate precision recall for each class, area, and iou threshold 195 | CM = confusion_matrix = coco_eval.confusion_matrix # [dt, gt] 196 | tp = np.diag(CM) 197 | tpsum = tp.sum() 198 | total = CM.sum() 199 | eps = np.spacing(2) 200 | 201 | num_gt = CM.sum(0) + eps 202 | num_dt = CM.sum(1) + eps 203 | pre = tp/num_dt 204 | rec = tp/num_gt 205 | f1 = 2*pre*rec/(pre + rec + eps) 206 | f1 = f1.mean() 207 | 208 | # naming error for dt 209 | CM_dt = coco_eval.confusion_matrix_dt 210 | tp_dt = np.diag(CM_dt).sum() 211 | total_dt = CM_dt.sum() 212 | 213 | tdt = np.diag(CM_dt) 214 | gdt = CM_dt.sum(0) + eps 215 | ddt = CM_dt.sum(1) + eps 216 | pre_dt = tdt / ddt 217 | rec_dt = tdt / gdt 218 | f1_dt = 2 * pre_dt * rec_dt / (pre_dt + rec_dt + eps) 219 | f1_dt = f1_dt.mean() 220 | # calculate fpr 221 | fpdt = ddt - tdt 222 | fndt = gdt - tdt 223 | tndt = total_dt - (tdt + fndt + fpdt) 224 | fpr_dt = fpdt / (fpdt + tndt + eps) 225 | # calculate fp/gt (a metric that computes #fps per gt for each gt_class) 226 | fp_per_cls = CM_dt.sum(0) - tdt 227 | gtcount = np.maximum(1, coco_eval.gt_count) 228 | fp_per_cls = (fp_per_cls/gtcount).mean() 229 | 230 | results = dict() 231 | results['accuracy'] = np.around(100 * tpsum/total, 2) 232 | results['naming_f1'] = np.around(100 * f1, 2) 233 | # additional results 234 | results['accuracy_dt'] = np.around(100 * tp_dt / total_dt, 2) 235 | results['naming_f1_dt'] = np.around(100 * f1_dt, 2) 236 | results['naming_fpr_dt'] = np.around(100 * fpr_dt.mean(), 2) 237 | results['fp_per_cls_dt'] = np.around(fp_per_cls, 2) 238 | 239 | # actual fp / obj 240 | fpcounter = coco_eval.fpcounter 241 | fpcount = defaultdict(lambda: []) # for each class, keep list of fps detected 242 | for (_, _, gtCls), v in fpcounter.items(): 243 | fpcount[gtCls].append(v) 244 | 245 | for k, v in fpcount.items(): 246 | fpcount[k] = np.mean(v) 247 | 248 | fp_per_obj = np.mean(list(fpcount.values())) 249 | results['naming_error'] = results['fp_per_obj_dt'] = np.around(fp_per_obj, 4) 250 | 251 | # # Final dict of results 252 | self._logger.info( 253 | "Evaluation results for {}: \n".format('Naming Error') + create_small_table(results) 254 | ) 255 | if not np.isfinite(sum(results.values())): 256 | self._logger.info("Some metrics cannot be computed and is shown as NaN.") 257 | 258 | return results 259 | 260 | 261 | def _evaluate_predictions_on_coco(coco_gt, coco_results, iou_type, kpt_oks_sigmas=None): 262 | """ 263 | Evaluate the coco results using COCOEval API. 264 | We will use the `evalImgs` datastructure to calculate precision/recall per image 265 | """ 266 | assert len(coco_results) > 0 267 | 268 | if iou_type == "segm": 269 | coco_results = copy.deepcopy(coco_results) 270 | # When evaluating mask AP, if the results contain bbox, cocoapi will 271 | # use the box area as the area of the instance, instead of the mask area. 272 | # This leads to a different definition of small/medium/large. 273 | # We remove the bbox field to let mask AP use mask area. 274 | for c in coco_results: 275 | c.pop("bbox", None) 276 | else: 277 | raise ValueError(f"iou_type {iou_type} not supported") 278 | 279 | coco_dt = coco_gt.loadRes(coco_results) 280 | coco_eval = NamingCOCOEval(coco_gt, coco_dt, iou_type) 281 | coco_eval.evaluate() 282 | return coco_eval 283 | -------------------------------------------------------------------------------- /detectron2/evaluation/tpmqscore_evaluator.py: -------------------------------------------------------------------------------- 1 | import contextlib 2 | import copy 3 | import io 4 | import itertools 5 | import json 6 | import logging 7 | import numpy as np 8 | import os 9 | import pickle 10 | from collections import OrderedDict, defaultdict 11 | from pycocotools import coco 12 | import pycocotools.mask as mask_util 13 | import torch 14 | from fvcore.common.file_io import PathManager 15 | from pycocotools.coco import COCO 16 | from tabulate import tabulate 17 | 18 | import detectron2.utils.comm as comm 19 | from detectron2.data import MetadataCatalog 20 | from detectron2.data.datasets.coco import convert_to_coco_json 21 | #from detectron2.evaluation.fast_eval_api import COCOeval_opt as COCOeval 22 | from pycocotools.cocoeval import COCOeval 23 | from detectron2.structures import Boxes, BoxMode, pairwise_iou 24 | from detectron2.utils.logger import create_small_table 25 | from .coco_evaluation import instances_to_coco_json 26 | from pycocotools import mask as maskUtils 27 | from .f_boundary import db_eval_boundary 28 | from .evaluator import DatasetEvaluator 29 | 30 | 31 | class TPMQScoreEvaluator(DatasetEvaluator): 32 | ''' 33 | This evaluator computes the F1-score at the boundary of true positive detected images with the set of ground truths 34 | ''' 35 | def __init__(self, dataset_name, cfg, distributed, output_dir=None): 36 | """ 37 | Args: 38 | dataset_name (str): name of the dataset to be evaluated. 39 | It must have either the following corresponding metadata: 40 | 41 | "json_file": the path to the COCO format annotation 42 | 43 | Or it must be in detectron2's standard dataset format 44 | so it can be converted to COCO format automatically. 45 | cfg (CfgNode): config instance 46 | distributed (True): if True, will collect results from all ranks and run evaluation 47 | in the main process. 48 | Otherwise, will evaluate the results in the current process. 49 | output_dir (str): optional, an output directory to dump all 50 | results predicted on the dataset. The dump contains two files: 51 | 52 | 1. "instance_predictions.pth" a file in torch serialization 53 | format that contains all the raw original predictions. 54 | 2. "coco_instances_results.json" a json file in COCO's result 55 | format. 56 | """ 57 | self._tasks = ('segm',) 58 | self._distributed = distributed 59 | self._output_dir = output_dir 60 | 61 | self._cpu_device = torch.device("cpu") 62 | self._logger = logging.getLogger(__name__) 63 | 64 | self._metadata = MetadataCatalog.get(dataset_name) 65 | if not hasattr(self._metadata, "json_file"): 66 | self._logger.info( 67 | f"'{dataset_name}' is not registered by `register_coco_instances`." 68 | " Therefore trying to convert it to COCO format ..." 69 | ) 70 | 71 | cache_path = os.path.join(output_dir, f"{dataset_name}_coco_format.json") 72 | self._metadata.json_file = cache_path 73 | convert_to_coco_json(dataset_name, cache_path) 74 | 75 | json_file = PathManager.get_local_path(self._metadata.json_file) 76 | with contextlib.redirect_stdout(io.StringIO()): 77 | self._coco_api = COCO(json_file) 78 | 79 | self._kpt_oks_sigmas = cfg.TEST.KEYPOINT_OKS_SIGMAS 80 | self._do_evaluation = "annotations" in self._coco_api.dataset 81 | 82 | 83 | def reset(self): 84 | self._predictions = [] 85 | 86 | def process(self, inputs, outputs): 87 | """ 88 | Args: 89 | inputs: the inputs to a COCO model (e.g., GeneralizedRCNN). 90 | It is a list of dict. Each dict corresponds to an image and 91 | contains keys like "height", "width", "file_name", "image_id". 92 | outputs: the outputs of a COCO model. It is a list of dicts with key 93 | "instances" that contains :class:`Instances`. 94 | """ 95 | for input, output in zip(inputs, outputs): 96 | prediction = {"image_id": input["image_id"]} 97 | 98 | if "instances" in output: 99 | instances = output["instances"].to(self._cpu_device) 100 | prediction["instances"] = instances_to_coco_json(instances, input["image_id"]) 101 | if "proposals" in output: 102 | prediction["proposals"] = output["proposals"].to(self._cpu_device) 103 | self._predictions.append(prediction) 104 | 105 | 106 | def evaluate(self): 107 | ## Gather up all predictions 108 | if self._distributed: 109 | comm.synchronize() 110 | predictions = comm.gather(self._predictions, dst=0) 111 | predictions = list(itertools.chain(*predictions)) 112 | 113 | if not comm.is_main_process(): 114 | return {} 115 | else: 116 | predictions = self._predictions 117 | 118 | if len(predictions) == 0: 119 | self._logger.warning("[COCOEvaluator] Did not receive valid predictions.") 120 | return {} 121 | 122 | self._results = OrderedDict() 123 | if "instances" in predictions[0]: 124 | self._eval_predictions(set(self._tasks), predictions) 125 | # Copy so the caller can do whatever with results 126 | return copy.deepcopy(self._results) 127 | 128 | 129 | def _eval_predictions(self, tasks, predictions): 130 | """ 131 | Evaluate predictions on the given tasks. 132 | Fill self._results with the metrics of the tasks. 133 | """ 134 | self._logger.info("Preparing results for COCO format ...") 135 | coco_results = list(itertools.chain(*[x["instances"] for x in predictions])) 136 | 137 | # unmap the category ids for COCO 138 | if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"): 139 | reverse_id_mapping = { 140 | v: k for k, v in self._metadata.thing_dataset_id_to_contiguous_id.items() 141 | } 142 | for result in coco_results: 143 | category_id = result["category_id"] 144 | assert ( 145 | category_id in reverse_id_mapping 146 | ), "A prediction has category_id={}, which is not available in the dataset.".format( 147 | category_id 148 | ) 149 | result["category_id"] = reverse_id_mapping[category_id] 150 | 151 | if not self._do_evaluation: 152 | self._logger.info("Annotations are not available for evaluation.") 153 | return 154 | 155 | self._logger.info("Evaluating predictions ...") 156 | for task in sorted(tasks): 157 | coco_eval = ( 158 | _evaluate_predictions_on_coco( 159 | self._coco_api, coco_results, task, kpt_oks_sigmas=self._kpt_oks_sigmas 160 | ) 161 | if len(coco_results) > 0 162 | else None # cocoapi does not handle empty results very well 163 | ) 164 | 165 | res = self._derive_coco_results( 166 | coco_eval, task, class_names=self._metadata.get("thing_classes") 167 | ) 168 | self._results['tpmq-' + task] = res 169 | 170 | 171 | def _derive_coco_results(self, coco_eval, iou_type, class_names=None): 172 | """ 173 | Use the COCOeval matches to derive tp, fp, fn => precision, recall for each image/category 174 | Next, for each (class, IoU threshold, object size), aggregate the F1-score (average them). 175 | 176 | Derive the desired score numbers from COCOeval. 177 | 178 | Args: 179 | coco_eval (None or COCOEval): None represents no predictions from model. 180 | iou_type (str): 181 | class_names (None or list[str]): if provided, will use it to predict 182 | per-category AP. 183 | 184 | Returns: 185 | a dict of {metric name: score} 186 | """ 187 | 188 | # Final metric 189 | metrics = ['TPMQ(IoU=0.5)'] 190 | 191 | gt_masks = coco_eval._gts 192 | dt_masks = coco_eval._dts 193 | # Return nans if there are no predictions! 194 | if coco_eval is None: 195 | self._logger.warn("No predictions from the model!") 196 | return {metric: float("nan") for metric in metrics} 197 | 198 | iou = defaultdict(list) 199 | # Calculate precision recall for each class, area, and iou threshold 200 | for evalImg in coco_eval.evalImgs: 201 | # Skip if maxDets is not the best value, or if evalImg is None (there was no detection) 202 | if evalImg is None: 203 | continue 204 | if evalImg['maxDet'] != coco_eval.params.maxDets[-1]: 205 | continue 206 | imgId = evalImg['image_id'] 207 | catId = evalImg['category_id'] 208 | gt_mask = gt_masks[imgId,catId] 209 | dt_mask = dt_masks[imgId,catId] 210 | cate_class, area, dtmatches, gtmatches, dtIg, gtIds, gtIg = [evalImg[x] for x in ['category_id', 'aRng', 'dtMatches', 'gtMatches','dtIgnore', 'gtIds', 'gtIgnore']] 211 | area = tuple(area) 212 | if len(gtIds) == 0: # no matches at any iou thresh 213 | continue 214 | iou_perimg = 0 215 | cnt = 0 216 | for dtid,dtm in enumerate(dtmatches[0]): # 0th index is iou thr 0.5 217 | if dtIg[0][dtid]: 218 | continue 219 | for gtm in gt_mask: 220 | if gtm['id'] == dtm: 221 | gt_bin_mask = maskUtils.decode(gtm['segmentation']) 222 | dt_bin_mask = maskUtils.decode(dt_mask[dtid]['segmentation']) 223 | iou_calc = db_eval_boundary(dt_bin_mask, gt_bin_mask, bound_th=1.0) 224 | iou_perimg += iou_calc 225 | cnt +=1 226 | if cnt==0: 227 | pass 228 | else: 229 | iou[area].append(iou_perimg/cnt) 230 | 231 | area_to_key = [(tuple(area), st) for area, st in zip(coco_eval.params.areaRng, \ 232 | ['all', 'small', 'medium', 'large'])] 233 | area_to_key = dict(area_to_key) 234 | 235 | # Calculate individual f1-scores 236 | results = { 237 | 'small': [], # small, med, large f1 at all thresholds 238 | 'medium': [], 239 | 'large':[], 240 | 'all': [] 241 | } 242 | for key, val in iou.items(): 243 | areakey = key 244 | results[area_to_key[areakey]].append(val) 245 | 246 | # Average these quantities 247 | for k, v in results.items(): 248 | results[k] = np.around(np.mean(v), 4) 249 | 250 | # Final dict of results 251 | results = {f'TPMQ_{k}': v for k, v in results.items()} 252 | self._logger.info( 253 | "Evaluation results for {}: \n".format('TPMQ-score') + create_small_table(results) 254 | ) 255 | if not np.isfinite(sum(results.values())): 256 | self._logger.info("Some metrics cannot be computed and is shown as NaN.") 257 | return results 258 | 259 | def _evaluate_predictions_on_coco(coco_gt, coco_results, iou_type, kpt_oks_sigmas=None): 260 | """ 261 | Evaluate the coco results using COCOEval API. 262 | We will use the `evalImgs` datastructure to calculate precision/recall per image 263 | """ 264 | assert len(coco_results) > 0 265 | 266 | if iou_type == "segm": 267 | coco_results = copy.deepcopy(coco_results) 268 | # When evaluating mask AP, if the results contain bbox, cocoapi will 269 | # use the box area as the area of the instance, instead of the mask area. 270 | # This leads to a different definition of small/medium/large. 271 | # We remove the bbox field to let mask AP use mask area. 272 | for c in coco_results: 273 | c.pop("bbox", None) 274 | else: 275 | raise ValueError(f"iou_type {iou_type} not supported") 276 | 277 | coco_dt = coco_gt.loadRes(coco_results) 278 | coco_eval = COCOeval(coco_gt, coco_dt, iou_type) 279 | 280 | coco_eval.evaluate() 281 | 282 | return coco_eval 283 | -------------------------------------------------------------------------------- /cocoapi/PythonAPI/pycocotools/_mask.pyx: -------------------------------------------------------------------------------- 1 | # distutils: language = c 2 | # distutils: sources = ../common/maskApi.c 3 | 4 | #************************************************************************** 5 | # Microsoft COCO Toolbox. version 2.0 6 | # Data, paper, and tutorials available at: http://mscoco.org/ 7 | # Code written by Piotr Dollar and Tsung-Yi Lin, 2015. 8 | # Licensed under the Simplified BSD License [see coco/license.txt] 9 | #************************************************************************** 10 | 11 | __author__ = 'tsungyi' 12 | 13 | import sys 14 | PYTHON_VERSION = sys.version_info[0] 15 | 16 | # import both Python-level and C-level symbols of Numpy 17 | # the API uses Numpy to interface C and Python 18 | import numpy as np 19 | cimport numpy as np 20 | from libc.stdlib cimport malloc, free 21 | 22 | # intialized Numpy. must do. 23 | np.import_array() 24 | 25 | # import numpy C function 26 | # we use PyArray_ENABLEFLAGS to make Numpy ndarray responsible to memory management 27 | cdef extern from "numpy/arrayobject.h": 28 | void PyArray_ENABLEFLAGS(np.ndarray arr, int flags) 29 | 30 | # Declare the prototype of the C functions in MaskApi.h 31 | cdef extern from "maskApi.h": 32 | ctypedef unsigned int uint 33 | ctypedef unsigned long siz 34 | ctypedef unsigned char byte 35 | ctypedef double* BB 36 | ctypedef struct RLE: 37 | siz h, 38 | siz w, 39 | siz m, 40 | uint* cnts, 41 | void rlesInit( RLE **R, siz n ) 42 | void rleEncode( RLE *R, const byte *M, siz h, siz w, siz n ) 43 | void rleDecode( const RLE *R, byte *mask, siz n ) 44 | void rleMerge( const RLE *R, RLE *M, siz n, int intersect ) 45 | void rleArea( const RLE *R, siz n, uint *a ) 46 | void rleIou( RLE *dt, RLE *gt, siz m, siz n, byte *iscrowd, double *o ) 47 | void bbIou( BB dt, BB gt, siz m, siz n, byte *iscrowd, double *o ) 48 | void rleToBbox( const RLE *R, BB bb, siz n ) 49 | void rleFrBbox( RLE *R, const BB bb, siz h, siz w, siz n ) 50 | void rleFrPoly( RLE *R, const double *xy, siz k, siz h, siz w ) 51 | char* rleToString( const RLE *R ) 52 | void rleFrString( RLE *R, char *s, siz h, siz w ) 53 | 54 | # python class to wrap RLE array in C 55 | # the class handles the memory allocation and deallocation 56 | cdef class RLEs: 57 | cdef RLE *_R 58 | cdef siz _n 59 | 60 | def __cinit__(self, siz n =0): 61 | rlesInit(&self._R, n) 62 | self._n = n 63 | 64 | # free the RLE array here 65 | def __dealloc__(self): 66 | if self._R is not NULL: 67 | for i in range(self._n): 68 | free(self._R[i].cnts) 69 | free(self._R) 70 | def __getattr__(self, key): 71 | if key == 'n': 72 | return self._n 73 | raise AttributeError(key) 74 | 75 | # python class to wrap Mask array in C 76 | # the class handles the memory allocation and deallocation 77 | cdef class Masks: 78 | cdef byte *_mask 79 | cdef siz _h 80 | cdef siz _w 81 | cdef siz _n 82 | 83 | def __cinit__(self, h, w, n): 84 | self._mask = malloc(h*w*n* sizeof(byte)) 85 | self._h = h 86 | self._w = w 87 | self._n = n 88 | # def __dealloc__(self): 89 | # the memory management of _mask has been passed to np.ndarray 90 | # it doesn't need to be freed here 91 | 92 | # called when passing into np.array() and return an np.ndarray in column-major order 93 | def __array__(self): 94 | cdef np.npy_intp shape[1] 95 | shape[0] = self._h*self._w*self._n 96 | # Create a 1D array, and reshape it to fortran/Matlab column-major array 97 | ndarray = np.PyArray_SimpleNewFromData(1, shape, np.NPY_UINT8, self._mask).reshape((self._h, self._w, self._n), order='F') 98 | # The _mask allocated by Masks is now handled by ndarray 99 | PyArray_ENABLEFLAGS(ndarray, np.NPY_OWNDATA) 100 | return ndarray 101 | 102 | # internal conversion from Python RLEs object to compressed RLE format 103 | def _toString(RLEs Rs): 104 | cdef siz n = Rs.n 105 | cdef bytes py_string 106 | cdef char* c_string 107 | objs = [] 108 | for i in range(n): 109 | c_string = rleToString( &Rs._R[i] ) 110 | py_string = c_string 111 | objs.append({ 112 | 'size': [Rs._R[i].h, Rs._R[i].w], 113 | 'counts': py_string 114 | }) 115 | free(c_string) 116 | return objs 117 | 118 | # internal conversion from compressed RLE format to Python RLEs object 119 | def _frString(rleObjs): 120 | cdef siz n = len(rleObjs) 121 | Rs = RLEs(n) 122 | cdef bytes py_string 123 | cdef char* c_string 124 | for i, obj in enumerate(rleObjs): 125 | if PYTHON_VERSION == 2: 126 | py_string = str(obj['counts']).encode('utf8') 127 | elif PYTHON_VERSION == 3: 128 | py_string = str.encode(obj['counts']) if type(obj['counts']) == str else obj['counts'] 129 | else: 130 | raise Exception('Python version must be 2 or 3') 131 | c_string = py_string 132 | rleFrString( &Rs._R[i], c_string, obj['size'][0], obj['size'][1] ) 133 | return Rs 134 | 135 | # encode mask to RLEs objects 136 | # list of RLE string can be generated by RLEs member function 137 | def encode(np.ndarray[np.uint8_t, ndim=3, mode='fortran'] mask): 138 | h, w, n = mask.shape[0], mask.shape[1], mask.shape[2] 139 | cdef RLEs Rs = RLEs(n) 140 | rleEncode(Rs._R,mask.data,h,w,n) 141 | objs = _toString(Rs) 142 | return objs 143 | 144 | # decode mask from compressed list of RLE string or RLEs object 145 | def decode(rleObjs): 146 | cdef RLEs Rs = _frString(rleObjs) 147 | h, w, n = Rs._R[0].h, Rs._R[0].w, Rs._n 148 | masks = Masks(h, w, n) 149 | rleDecode(Rs._R, masks._mask, n); 150 | return np.array(masks) 151 | 152 | def merge(rleObjs, intersect=0): 153 | cdef RLEs Rs = _frString(rleObjs) 154 | cdef RLEs R = RLEs(1) 155 | rleMerge(Rs._R, R._R, Rs._n, intersect) 156 | obj = _toString(R)[0] 157 | return obj 158 | 159 | def area(rleObjs): 160 | cdef RLEs Rs = _frString(rleObjs) 161 | cdef uint* _a = malloc(Rs._n* sizeof(uint)) 162 | rleArea(Rs._R, Rs._n, _a) 163 | cdef np.npy_intp shape[1] 164 | shape[0] = Rs._n 165 | a = np.array((Rs._n, ), dtype=np.uint8) 166 | a = np.PyArray_SimpleNewFromData(1, shape, np.NPY_UINT32, _a) 167 | PyArray_ENABLEFLAGS(a, np.NPY_OWNDATA) 168 | return a 169 | 170 | # iou computation. support function overload (RLEs-RLEs and bbox-bbox). 171 | def iou(dt, gt, pyiscrowd): 172 | def _preproc(objs): 173 | if len(objs) == 0: 174 | return objs 175 | if type(objs) == np.ndarray: 176 | if len(objs.shape) == 1: 177 | objs = objs.reshape((objs[0], 1)) 178 | # check if it's Nx4 bbox 179 | if not len(objs.shape) == 2 or not objs.shape[1] == 4: 180 | raise Exception('numpy ndarray input is only for *bounding boxes* and should have Nx4 dimension') 181 | objs = objs.astype(np.double) 182 | elif type(objs) == list: 183 | # check if list is in box format and convert it to np.ndarray 184 | isbox = np.all(np.array([(len(obj)==4) and ((type(obj)==list) or (type(obj)==np.ndarray)) for obj in objs])) 185 | isrle = np.all(np.array([type(obj) == dict for obj in objs])) 186 | if isbox: 187 | objs = np.array(objs, dtype=np.double) 188 | if len(objs.shape) == 1: 189 | objs = objs.reshape((1,objs.shape[0])) 190 | elif isrle: 191 | objs = _frString(objs) 192 | else: 193 | raise Exception('list input can be bounding box (Nx4) or RLEs ([RLE])') 194 | else: 195 | raise Exception('unrecognized type. The following type: RLEs (rle), np.ndarray (box), and list (box) are supported.') 196 | return objs 197 | def _rleIou(RLEs dt, RLEs gt, np.ndarray[np.uint8_t, ndim=1] iscrowd, siz m, siz n, np.ndarray[np.double_t, ndim=1] _iou): 198 | rleIou( dt._R, gt._R, m, n, iscrowd.data, _iou.data ) 199 | def _bbIou(np.ndarray[np.double_t, ndim=2] dt, np.ndarray[np.double_t, ndim=2] gt, np.ndarray[np.uint8_t, ndim=1] iscrowd, siz m, siz n, np.ndarray[np.double_t, ndim=1] _iou): 200 | bbIou( dt.data, gt.data, m, n, iscrowd.data, _iou.data ) 201 | def _len(obj): 202 | cdef siz N = 0 203 | if type(obj) == RLEs: 204 | N = obj.n 205 | elif len(obj)==0: 206 | pass 207 | elif type(obj) == np.ndarray: 208 | N = obj.shape[0] 209 | return N 210 | # convert iscrowd to numpy array 211 | cdef np.ndarray[np.uint8_t, ndim=1] iscrowd = np.array(pyiscrowd, dtype=np.uint8) 212 | # simple type checking 213 | cdef siz m, n 214 | dt = _preproc(dt) 215 | gt = _preproc(gt) 216 | m = _len(dt) 217 | n = _len(gt) 218 | if m == 0 or n == 0: 219 | return [] 220 | if not type(dt) == type(gt): 221 | raise Exception('The dt and gt should have the same data type, either RLEs, list or np.ndarray') 222 | 223 | # define local variables 224 | cdef double* _iou = 0 225 | cdef np.npy_intp shape[1] 226 | # check type and assign iou function 227 | if type(dt) == RLEs: 228 | _iouFun = _rleIou 229 | elif type(dt) == np.ndarray: 230 | _iouFun = _bbIou 231 | else: 232 | raise Exception('input data type not allowed.') 233 | _iou = malloc(m*n* sizeof(double)) 234 | iou = np.zeros((m*n, ), dtype=np.double) 235 | shape[0] = m*n 236 | iou = np.PyArray_SimpleNewFromData(1, shape, np.NPY_DOUBLE, _iou) 237 | PyArray_ENABLEFLAGS(iou, np.NPY_OWNDATA) 238 | _iouFun(dt, gt, iscrowd, m, n, iou) 239 | return iou.reshape((m,n), order='F') 240 | 241 | def toBbox( rleObjs ): 242 | cdef RLEs Rs = _frString(rleObjs) 243 | cdef siz n = Rs.n 244 | cdef BB _bb = malloc(4*n* sizeof(double)) 245 | rleToBbox( Rs._R, _bb, n ) 246 | cdef np.npy_intp shape[1] 247 | shape[0] = 4*n 248 | bb = np.array((1,4*n), dtype=np.double) 249 | bb = np.PyArray_SimpleNewFromData(1, shape, np.NPY_DOUBLE, _bb).reshape((n, 4)) 250 | PyArray_ENABLEFLAGS(bb, np.NPY_OWNDATA) 251 | return bb 252 | 253 | def frBbox(np.ndarray[np.double_t, ndim=2] bb, siz h, siz w ): 254 | cdef siz n = bb.shape[0] 255 | Rs = RLEs(n) 256 | rleFrBbox( Rs._R, bb.data, h, w, n ) 257 | objs = _toString(Rs) 258 | return objs 259 | 260 | def frPoly( poly, siz h, siz w ): 261 | cdef np.ndarray[np.double_t, ndim=1] np_poly 262 | n = len(poly) 263 | Rs = RLEs(n) 264 | for i, p in enumerate(poly): 265 | np_poly = np.array(p, dtype=np.double, order='F') 266 | rleFrPoly( &Rs._R[i], np_poly.data, int(len(p)/2), h, w ) 267 | objs = _toString(Rs) 268 | return objs 269 | 270 | def frUncompressedRLE(ucRles, siz h, siz w): 271 | cdef np.ndarray[np.uint32_t, ndim=1] cnts 272 | cdef RLE R 273 | cdef uint *data 274 | n = len(ucRles) 275 | objs = [] 276 | for i in range(n): 277 | Rs = RLEs(1) 278 | cnts = np.array(ucRles[i]['counts'], dtype=np.uint32) 279 | # time for malloc can be saved here but it's fine 280 | data = malloc(len(cnts)* sizeof(uint)) 281 | for j in range(len(cnts)): 282 | data[j] = cnts[j] 283 | R = RLE(ucRles[i]['size'][0], ucRles[i]['size'][1], len(cnts), data) 284 | Rs._R[0] = R 285 | objs.append(_toString(Rs)[0]) 286 | return objs 287 | 288 | def frPyObjects(pyobj, h, w): 289 | # encode rle from a list of python objects 290 | if type(pyobj) == np.ndarray: 291 | objs = frBbox(pyobj, h, w) 292 | elif type(pyobj) == list and len(pyobj[0]) == 4: 293 | objs = frBbox(pyobj, h, w) 294 | elif type(pyobj) == list and len(pyobj[0]) > 4: 295 | objs = frPoly(pyobj, h, w) 296 | elif type(pyobj) == list and type(pyobj[0]) == dict \ 297 | and 'counts' in pyobj[0] and 'size' in pyobj[0]: 298 | objs = frUncompressedRLE(pyobj, h, w) 299 | # encode rle from single python object 300 | elif type(pyobj) == list and len(pyobj) == 4: 301 | objs = frBbox([pyobj], h, w)[0] 302 | elif type(pyobj) == list and len(pyobj) > 4: 303 | objs = frPoly([pyobj], h, w)[0] 304 | elif type(pyobj) == dict and 'counts' in pyobj and 'size' in pyobj: 305 | objs = frUncompressedRLE([pyobj], h, w)[0] 306 | else: 307 | raise Exception('input type is not supported.') 308 | return objs 309 | -------------------------------------------------------------------------------- /detectron2/evaluation/connectiveness_evaluator.py: -------------------------------------------------------------------------------- 1 | import contextlib 2 | import copy 3 | import io 4 | import itertools 5 | import json 6 | import logging 7 | import numpy as np 8 | import os 9 | import pickle 10 | from collections import OrderedDict, defaultdict 11 | import pycocotools.mask as mask_util 12 | import torch 13 | from fvcore.common.file_io import PathManager 14 | from pycocotools.coco import COCO 15 | from tabulate import tabulate 16 | 17 | import detectron2.utils.comm as comm 18 | from detectron2.data import MetadataCatalog 19 | from detectron2.data.datasets.coco import convert_to_coco_json 20 | #from detectron2.evaluation.fast_eval_api import COCOeval_opt as COCOeval 21 | from pycocotools.cocoeval import COCOeval 22 | from detectron2.structures import Boxes, BoxMode, pairwise_iou 23 | from detectron2.utils.logger import create_small_table 24 | from detectron2.evaluation.coco_evaluation import instances_to_coco_json 25 | 26 | from detectron2.evaluation.evaluator import DatasetEvaluator 27 | 28 | 29 | class ConnectivenessEvaluator(DatasetEvaluator): 30 | ''' 31 | This evaluator computes the Duplicate Confusion of detected images with the set of itself 32 | 33 | This is to capture the amount of overlap between predictions by the model, used 34 | as an indicator of how confused it is 35 | ''' 36 | def __init__(self, dataset_name, cfg, distributed, output_dir=None): 37 | """ 38 | Args: 39 | dataset_name (str): name of the dataset to be evaluated. 40 | It must have either the following corresponding metadata: 41 | 42 | "json_file": the path to the COCO format annotation 43 | 44 | Or it must be in detectron2's standard dataset format 45 | so it can be converted to COCO format automatically. 46 | cfg (CfgNode): config instance 47 | distributed (True): if True, will collect results from all ranks and run evaluation 48 | in the main process. 49 | Otherwise, will evaluate the results in the current process. 50 | output_dir (str): optional, an output directory to dump all 51 | results predicted on the dataset. The dump contains two files: 52 | 53 | 1. "instance_predictions.pth" a file in torch serialization 54 | format that contains all the raw original predictions. 55 | 2. "coco_instances_results.json" a json file in COCO's result 56 | format. 57 | """ 58 | self._tasks = ('segm',) 59 | self._distributed = distributed 60 | self._output_dir = output_dir 61 | 62 | self._cpu_device = torch.device("cpu") 63 | self._logger = logging.getLogger(__name__) 64 | 65 | self._metadata = MetadataCatalog.get(dataset_name) 66 | if not hasattr(self._metadata, "json_file"): 67 | self._logger.info( 68 | f"'{dataset_name}' is not registered by `register_coco_instances`." 69 | " Therefore trying to convert it to COCO format ..." 70 | ) 71 | 72 | cache_path = os.path.join(output_dir, f"{dataset_name}_coco_format.json") 73 | self._metadata.json_file = cache_path 74 | convert_to_coco_json(dataset_name, cache_path) 75 | 76 | json_file = PathManager.get_local_path(self._metadata.json_file) 77 | with contextlib.redirect_stdout(io.StringIO()): 78 | self._coco_api = COCO(json_file) 79 | 80 | self._kpt_oks_sigmas = cfg.TEST.KEYPOINT_OKS_SIGMAS 81 | # Test set json files do not contain annotations (evaluation must be 82 | # performed using the COCO evaluation server). 83 | self._do_evaluation = "annotations" in self._coco_api.dataset 84 | 85 | def reset(self): 86 | self._predictions = [] 87 | 88 | def process(self, inputs, outputs): 89 | """ 90 | Args: 91 | inputs: the inputs to a COCO model (e.g., GeneralizedRCNN). 92 | It is a list of dict. Each dict corresponds to an image and 93 | contains keys like "height", "width", "file_name", "image_id". 94 | outputs: the outputs of a COCO model. It is a list of dicts with key 95 | "instances" that contains :class:`Instances`. 96 | """ 97 | for input, output in zip(inputs, outputs): 98 | prediction = {"image_id": input["image_id"]} 99 | 100 | if "instances" in output: 101 | instances = output["instances"].to(self._cpu_device) 102 | prediction["instances"] = instances_to_coco_json(instances, input["image_id"]) 103 | if "proposals" in output: 104 | prediction["proposals"] = output["proposals"].to(self._cpu_device) 105 | self._predictions.append(prediction) 106 | 107 | 108 | def evaluate(self): 109 | # Gather up all the predictions, then we will intercept the matched images from the 110 | # `COCOEval` class 111 | ## Gather up all predictions 112 | if self._distributed: 113 | comm.synchronize() 114 | predictions = comm.gather(self._predictions, dst=0) 115 | predictions = list(itertools.chain(*predictions)) 116 | 117 | if not comm.is_main_process(): 118 | return {} 119 | else: 120 | predictions = self._predictions 121 | 122 | if len(predictions) == 0: 123 | self._logger.warning("[COCOEvaluator] Did not receive valid predictions.") 124 | return {} 125 | 126 | self._results = OrderedDict() 127 | if "instances" in predictions[0]: 128 | self._eval_predictions(set(self._tasks), predictions) 129 | # Copy so the caller can do whatever with results 130 | return copy.deepcopy(self._results) 131 | 132 | def _eval_predictions(self, tasks, predictions): 133 | """ 134 | Evaluate predictions on the given tasks. 135 | Fill self._results with the metrics of the tasks. 136 | """ 137 | self._logger.info("Preparing results for COCO format ...") 138 | coco_results = list(itertools.chain(*[x["instances"] for x in predictions])) 139 | 140 | # unmap the category ids for COCO 141 | if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"): 142 | reverse_id_mapping = { 143 | v: k for k, v in self._metadata.thing_dataset_id_to_contiguous_id.items() 144 | } 145 | for result in coco_results: 146 | category_id = result["category_id"] 147 | assert ( 148 | category_id in reverse_id_mapping 149 | ), "A prediction has category_id={}, which is not available in the dataset.".format( 150 | category_id 151 | ) 152 | result["category_id"] = reverse_id_mapping[category_id] 153 | 154 | if not self._do_evaluation: 155 | self._logger.info("Annotations are not available for evaluation.") 156 | return 157 | 158 | self._logger.info("Evaluating predictions ...") 159 | for task in sorted(tasks): 160 | coco_eval = ( 161 | _evaluate_predictions_on_coco( 162 | self._coco_api, coco_results, task, kpt_oks_sigmas=self._kpt_oks_sigmas 163 | ) 164 | if len(coco_results) > 0 165 | else None # cocoapi does not handle empty results very well 166 | ) 167 | 168 | res = self._derive_coco_results( 169 | coco_eval, task, class_names=self._metadata.get("thing_classes") 170 | ) 171 | self._results['cc-' + task] = res 172 | 173 | 174 | def _derive_coco_results(self, coco_eval, iou_type, class_names=None): 175 | """ 176 | Use the COCOeval matches to derive tp, fp, fn => precision, recall for each image/category 177 | Next, for each (class, IoU threshold, object size), aggregate the F1-score (average them). 178 | 179 | Derive the desired score numbers from COCOeval. 180 | 181 | Args: 182 | coco_eval (None or COCOEval): None represents no predictions from model. 183 | iou_type (str): 184 | class_names (None or list[str]): if provided, will use it to predict 185 | per-category AP. 186 | 187 | Returns: 188 | a dict of {metric name: score} 189 | """ 190 | 191 | # Final metric 192 | metrics = ["cc_all", "cc_0.5", "cc_0.75"] 193 | 194 | # Return nans if there are no predictions! 195 | if coco_eval is None: 196 | self._logger.warn("No predictions from the model!") 197 | return {metric: 0. for metric in metrics} 198 | 199 | # get ious with dt 200 | import copy 201 | results_coco = copy.deepcopy(coco_eval.cocoDt) 202 | self_coco_eval = COCOeval(results_coco, results_coco) 203 | self_coco_eval.evaluate() 204 | 205 | dt_iou_submat = defaultdict(list) 206 | for evalImg in self_coco_eval.evalImgs: 207 | # Skip if maxDets is not the best value, or if evalImg is None (there was no detection), or if area is not all 208 | if evalImg is None: 209 | continue 210 | if evalImg['maxDet'] != self_coco_eval.params.maxDets[-1]: 211 | continue 212 | if evalImg['aRng'][0] != 0 or evalImg['aRng'][1] != 10000000000.: 213 | continue 214 | cate_class, area, dt, dtIg, dtScores, dtIds, gtIds, gtIg, ious = [evalImg[x] for x in ['category_id', 'aRng', 'dtMatches', 'dtIgnore', 'dtScores', 'dtIds', 'gtIds', 'gtIgnore', 'ious']] 215 | num_gt = len(gtIds) - np.sum(gtIg) 216 | if num_gt == 0: 217 | continue 218 | dt_iou_submat[evalImg['image_id']].append({'category_id': cate_class, 'dtIds': dtIds, 'gtIds': gtIds, 'dtScores': dtScores, 'ious': ious}) 219 | 220 | counting_confusion = [] 221 | from tqdm import tqdm 222 | for img_idx in tqdm(dt_iou_submat): 223 | counting_confusion.append([]) 224 | scores = [dis['dtScores'] for dis in dt_iou_submat[img_idx]] 225 | self_ious = [dis['ious'] for dis in dt_iou_submat[img_idx]] 226 | 227 | for iou_thresh in list(np.linspace(0.05, 0.95, 10)) + [0.5]: 228 | class_confusions = [] 229 | for class_idx in range(len(scores)): 230 | connections = scores[class_idx] * (self_ious[class_idx] > iou_thresh) 231 | connections = np.minimum(connections, connections.T) 232 | 233 | old_connections = np.zeros_like(connections) 234 | while np.abs(connections - old_connections).sum() > 0.1: 235 | old_connections = connections 236 | stacked_conns = np.tile(connections, [len(connections), 1, 1]) 237 | connections = np.minimum(stacked_conns, stacked_conns.T).max(1) 238 | 239 | confusions = [_calc_counting_confusion(scores[class_idx], connections, ct) for ct in np.linspace(0.05, 0.95, 10)] 240 | class_confusions.append(confusions) 241 | class_confusions = np.array(class_confusions).sum(0) 242 | class_confusions[:, 1] = np.maximum(class_confusions[:, 1], np.ones(len(class_confusions))) 243 | counting_confusion[-1].append((class_confusions[:, 0] / class_confusions[:, 1]).mean()) 244 | 245 | counting_confusion = np.array(counting_confusion) 246 | F = 1000 247 | cc_50 = np.around(F * counting_confusion[:, -1].mean(), 2) 248 | cc_75 = np.around(F * counting_confusion[:, 7].mean(), 2) 249 | cc_all = np.around(F * counting_confusion[:, :-1].mean(), 2) 250 | results = {'cc_all': cc_all, 'cc_0.5': cc_50, 'cc_0.75': cc_75} 251 | 252 | self._logger.info( 253 | "Evaluation results for {}: \n".format('Counting Confusion') + create_small_table(results) 254 | ) 255 | print(results) 256 | 257 | return results 258 | 259 | def _calc_counting_confusion(scores, connectivity, conf_thresh): 260 | valid_preds = np.arange(len(scores))[scores > conf_thresh] 261 | error = np.zeros((len(valid_preds), len(valid_preds))) 262 | n_valid = len(valid_preds) 263 | for i in range(n_valid): 264 | for j in range(n_valid): 265 | if i == j: 266 | continue 267 | x, y = valid_preds[i], valid_preds[j] 268 | error[i, j] = scores[y] / scores[x] * connectivity[x, y] 269 | return error.sum(), n_valid 270 | 271 | 272 | def _evaluate_predictions_on_coco(coco_gt, coco_results, iou_type, kpt_oks_sigmas=None): 273 | """ 274 | Evaluate the coco results using COCOEval API. 275 | We will use the `evalImgs` datastructure to calculate precision/recall per image 276 | """ 277 | assert len(coco_results) > 0 278 | 279 | if iou_type == "segm": 280 | coco_results = copy.deepcopy(coco_results) 281 | for c in coco_results: 282 | c.pop("bbox", None) 283 | else: 284 | raise ValueError(f"iou_type {iou_type} not supported") 285 | 286 | coco_dt = coco_gt.loadRes(coco_results) 287 | coco_eval = COCOeval(coco_gt, coco_dt, iou_type) 288 | 289 | coco_eval.evaluate() 290 | return coco_eval 291 | -------------------------------------------------------------------------------- /detectron2/evaluation/f1score_evaluator.py: -------------------------------------------------------------------------------- 1 | import contextlib 2 | import copy 3 | import io 4 | import itertools 5 | import json 6 | import logging 7 | import numpy as np 8 | import os 9 | import pickle 10 | from collections import OrderedDict, defaultdict 11 | import pycocotools.mask as mask_util 12 | import torch 13 | from fvcore.common.file_io import PathManager 14 | from pycocotools.coco import COCO 15 | from tabulate import tabulate 16 | 17 | import detectron2.utils.comm as comm 18 | from detectron2.data import MetadataCatalog 19 | from detectron2.data.datasets.coco import convert_to_coco_json 20 | #from detectron2.evaluation.fast_eval_api import COCOeval_opt as COCOeval 21 | from pycocotools.cocoeval import COCOeval 22 | from detectron2.structures import Boxes, BoxMode, pairwise_iou 23 | from detectron2.utils.logger import create_small_table 24 | from .coco_evaluation import instances_to_coco_json 25 | from .evaluator import DatasetEvaluator 26 | 27 | 28 | class F1ScoreEvaluator(DatasetEvaluator): 29 | ''' 30 | This evaluator computes the F1-score of detected images with the set of ground truths 31 | 32 | This makes sure that a good detector has a high precision and recall 33 | This value is computed at different IoU thresholds (to average the effects of segmentation performance) 34 | ''' 35 | def __init__(self, dataset_name, cfg, distributed, output_dir=None): 36 | """ 37 | Args: 38 | dataset_name (str): name of the dataset to be evaluated. 39 | It must have either the following corresponding metadata: 40 | 41 | "json_file": the path to the COCO format annotation 42 | 43 | Or it must be in detectron2's standard dataset format 44 | so it can be converted to COCO format automatically. 45 | cfg (CfgNode): config instance 46 | distributed (True): if True, will collect results from all ranks and run evaluation 47 | in the main process. 48 | Otherwise, will evaluate the results in the current process. 49 | output_dir (str): optional, an output directory to dump all 50 | results predicted on the dataset. The dump contains two files: 51 | 52 | 1. "instance_predictions.pth" a file in torch serialization 53 | format that contains all the raw original predictions. 54 | 2. "coco_instances_results.json" a json file in COCO's result 55 | format. 56 | """ 57 | self._tasks = ('segm',) 58 | self._distributed = distributed 59 | self._output_dir = output_dir 60 | 61 | self._cpu_device = torch.device("cpu") 62 | self._logger = logging.getLogger(__name__) 63 | 64 | self._metadata = MetadataCatalog.get(dataset_name) 65 | if not hasattr(self._metadata, "json_file"): 66 | self._logger.info( 67 | f"'{dataset_name}' is not registered by `register_coco_instances`." 68 | " Therefore trying to convert it to COCO format ..." 69 | ) 70 | 71 | cache_path = os.path.join(output_dir, f"{dataset_name}_coco_format.json") 72 | self._metadata.json_file = cache_path 73 | convert_to_coco_json(dataset_name, cache_path) 74 | 75 | json_file = PathManager.get_local_path(self._metadata.json_file) 76 | with contextlib.redirect_stdout(io.StringIO()): 77 | self._coco_api = COCO(json_file) 78 | 79 | self._kpt_oks_sigmas = cfg.TEST.KEYPOINT_OKS_SIGMAS 80 | # Test set json files do not contain annotations (evaluation must be 81 | # performed using the COCO evaluation server). 82 | self._do_evaluation = "annotations" in self._coco_api.dataset 83 | 84 | 85 | def reset(self): 86 | self._predictions = [] 87 | 88 | def process(self, inputs, outputs): 89 | """ 90 | Args: 91 | inputs: the inputs to a COCO model (e.g., GeneralizedRCNN). 92 | It is a list of dict. Each dict corresponds to an image and 93 | contains keys like "height", "width", "file_name", "image_id". 94 | outputs: the outputs of a COCO model. It is a list of dicts with key 95 | "instances" that contains :class:`Instances`. 96 | """ 97 | for input, output in zip(inputs, outputs): 98 | prediction = {"image_id": input["image_id"]} 99 | 100 | if "instances" in output: 101 | instances = output["instances"].to(self._cpu_device) 102 | prediction["instances"] = instances_to_coco_json(instances, input["image_id"]) 103 | if "proposals" in output: 104 | prediction["proposals"] = output["proposals"].to(self._cpu_device) 105 | self._predictions.append(prediction) 106 | 107 | 108 | def evaluate(self): 109 | # Gather up all the predictions, then we will intercept the matched images from the 110 | # `COCOEval` class 111 | ## Gather up all predictions 112 | if self._distributed: 113 | comm.synchronize() 114 | predictions = comm.gather(self._predictions, dst=0) 115 | predictions = list(itertools.chain(*predictions)) 116 | if not comm.is_main_process(): 117 | return {} 118 | else: 119 | predictions = self._predictions 120 | 121 | if len(predictions) == 0: 122 | self._logger.warning("[COCOEvaluator] Did not receive valid predictions.") 123 | return {} 124 | 125 | self._results = OrderedDict() 126 | if "instances" in predictions[0]: 127 | self._eval_predictions(set(self._tasks), predictions) 128 | # Copy so the caller can do whatever with results 129 | return copy.deepcopy(self._results) 130 | 131 | 132 | def _eval_predictions(self, tasks, predictions): 133 | """ 134 | Evaluate predictions on the given tasks. 135 | Fill self._results with the metrics of the tasks. 136 | """ 137 | self._logger.info("Preparing results for COCO format ...") 138 | coco_results = list(itertools.chain(*[x["instances"] for x in predictions])) 139 | 140 | # unmap the category ids for COCO 141 | if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"): 142 | reverse_id_mapping = { 143 | v: k for k, v in self._metadata.thing_dataset_id_to_contiguous_id.items() 144 | } 145 | for result in coco_results: 146 | category_id = result["category_id"] 147 | assert ( 148 | category_id in reverse_id_mapping 149 | ), "A prediction has category_id={}, which is not available in the dataset.".format( 150 | category_id 151 | ) 152 | result["category_id"] = reverse_id_mapping[category_id] 153 | 154 | if not self._do_evaluation: 155 | self._logger.info("Annotations are not available for evaluation.") 156 | return 157 | 158 | self._logger.info("Evaluating predictions ...") 159 | for task in sorted(tasks): 160 | coco_eval = ( 161 | _evaluate_predictions_on_coco( 162 | self._coco_api, coco_results, task, kpt_oks_sigmas=self._kpt_oks_sigmas 163 | ) 164 | if len(coco_results) > 0 165 | else None # cocoapi does not handle empty results very well 166 | ) 167 | 168 | res = self._derive_coco_results( 169 | coco_eval, task, class_names=self._metadata.get("thing_classes") 170 | ) 171 | self._results['f1-' + task] = res 172 | 173 | 174 | def _derive_coco_results(self, coco_eval, iou_type, class_names=None): 175 | """ 176 | Use the COCOeval matches to derive tp, fp, fn => precision, recall for each image/category 177 | Next, for each (class, IoU threshold, object size), aggregate the F1-score (average them). 178 | 179 | Derive the desired score numbers from COCOeval. 180 | 181 | Args: 182 | coco_eval (None or COCOEval): None represents no predictions from model. 183 | iou_type (str): 184 | class_names (None or list[str]): if provided, will use it to predict 185 | per-category AP. 186 | 187 | Returns: 188 | a dict of {metric name: score} 189 | """ 190 | 191 | # Final metric 192 | metrics = ["F1-all", "F1-0.5", "F1-0.75", "F1-small", "F1-medium", "F1-large"] 193 | 194 | # Return nans if there are no predictions! 195 | if coco_eval is None: 196 | self._logger.warn("No predictions from the model!") 197 | return {metric: float("nan") for metric in metrics} 198 | 199 | prc_dict = defaultdict(list) 200 | rec_dict = defaultdict(list) 201 | # Calculate precision recall for each class, area, and iou threshold 202 | for evalImg in coco_eval.evalImgs: 203 | # Skip if maxDets is not the best value, or if evalImg is None (there was no detection) 204 | if evalImg is None: 205 | continue 206 | if evalImg['maxDet'] != coco_eval.params.maxDets[-1]: 207 | continue 208 | # Extract detected and ground truths 209 | # int, [a1, a2], [TxD], [TxD], [G], [G] 210 | cate_class, area, dt, dtIg, gtIds, gtIg = [evalImg[x] for x in ['category_id', 'aRng', 'dtMatches', 'dtIgnore', 'gtIds', 'gtIgnore']] 211 | # [TxG] 212 | gtm = evalImg['gtMatches'] 213 | gtm = (gtm > 0) 214 | # get area 215 | area = tuple(area) 216 | # Get iou thresholds too 217 | thrs = coco_eval.params.iouThrs 218 | # Total number of predictions 219 | num_gt = len(gtIds) - np.sum(gtIg) 220 | T, D = dt.shape 221 | #if D == 0 or num_gt == 0: 222 | if num_gt == 0: 223 | continue 224 | # There are some detections, see if there are matches and they are not ignored 225 | # Compute total true positives and false positives for the image (for each threshold) 226 | # Size of tp, fp = [T,] 227 | tp = np.logical_and( dt , np.logical_not(dtIg) ).sum(1) * 1.0 228 | fp = np.logical_and( np.logical_not(dt), np.logical_not(dtIg) ).sum(1) * 1.0 229 | fn = np.logical_and( np.logical_not(gtm), np.logical_not(gtIg)[np.newaxis, :]).sum(1) * 1.0 230 | for i in range(T): 231 | prc_dict[(cate_class, i, area)].append(tp[i] / (tp[i] + fp[i] + np.spacing(1))) 232 | rec_dict[(cate_class, i, area)].append(tp[i] / (num_gt + np.spacing(1))) 233 | 234 | # Initialize f1-scores 235 | f1_scores = defaultdict(list) 236 | area_to_key = [(tuple(area), st) for area, st in zip(coco_eval.params.areaRng, \ 237 | ['all', 'small', 'medium', 'large'])] 238 | area_to_key = dict(area_to_key) 239 | 240 | # Calculated the precision and recall for all these images, now calculate the F1-score averaged 241 | # over each of these parameters 242 | for key in prc_dict.keys(): 243 | cate_cls, iou_thr, area = key 244 | areakey = area_to_key[area] 245 | pr = np.array(prc_dict[key]) 246 | rc = np.array(rec_dict[key]) 247 | assert len(pr) == len(rc) 248 | f1 = 2*pr*rc/(pr + rc + np.spacing(1)) 249 | if len(f1) == 0: 250 | continue 251 | f1_scores[(iou_thr, areakey)].append(f1.mean()) 252 | 253 | # Compute average F1score for given iou threshold and area 254 | for key, val in f1_scores.items(): 255 | f1_scores[key] = np.mean(val) 256 | 257 | # Calculate individual f1-scores 258 | results = { 259 | 'all': [], 260 | '0.5': [], 261 | '0.75': [], 262 | 'small': [], # small, med, large f1 at all thresholds 263 | 'medium': [], 264 | 'large':[] 265 | } 266 | for key, f1_mean in f1_scores.items(): 267 | iou_thr, areakey = key 268 | results[areakey].append(f1_mean) 269 | if areakey == 'all': 270 | iou_thr = coco_eval.params.iouThrs[iou_thr] 271 | if str(iou_thr) in ['0.5', '0.75']: 272 | results[str(iou_thr)].append(f1_mean) 273 | 274 | # Average these quantities 275 | for k, v in results.items(): 276 | results[k] = np.around(np.mean(v), 4) 277 | 278 | # Final dict of results 279 | results = {f'F1_{k}': v for k, v in results.items()} 280 | self._logger.info( 281 | "Evaluation results for {}: \n".format('F1-score') + create_small_table(results) 282 | ) 283 | if not np.isfinite(sum(results.values())): 284 | self._logger.info("Some metrics cannot be computed and is shown as NaN.") 285 | return results 286 | 287 | 288 | 289 | def _evaluate_predictions_on_coco(coco_gt, coco_results, iou_type, kpt_oks_sigmas=None): 290 | """ 291 | Evaluate the coco results using COCOEval API. 292 | We will use the `evalImgs` datastructure to calculate precision/recall per image 293 | """ 294 | assert len(coco_results) > 0 295 | 296 | if iou_type == "segm": 297 | coco_results = copy.deepcopy(coco_results) 298 | # When evaluating mask AP, if the results contain bbox, cocoapi will 299 | # use the box area as the area of the instance, instead of the mask area. 300 | # This leads to a different definition of small/medium/large. 301 | # We remove the bbox field to let mask AP use mask area. 302 | for c in coco_results: 303 | c.pop("bbox", None) 304 | else: 305 | raise ValueError(f"iou_type {iou_type} not supported") 306 | 307 | coco_dt = coco_gt.loadRes(coco_results) 308 | coco_eval = COCOeval(coco_gt, coco_dt, iou_type) 309 | coco_eval.evaluate() 310 | 311 | return coco_eval 312 | -------------------------------------------------------------------------------- /cocoapi/MatlabAPI/CocoApi.m: -------------------------------------------------------------------------------- 1 | classdef CocoApi 2 | % Interface for accessing the Microsoft COCO dataset. 3 | % 4 | % Microsoft COCO is a large image dataset designed for object detection, 5 | % segmentation, and caption generation. CocoApi.m is a Matlab API that 6 | % assists in loading, parsing and visualizing the annotations in COCO. 7 | % Please visit http://mscoco.org/ for more information on COCO, including 8 | % for the data, paper, and tutorials. The exact format of the annotations 9 | % is also described on the COCO website. For example usage of the CocoApi 10 | % please see cocoDemo.m. In addition to this API, please download both 11 | % the COCO images and annotations in order to run the demo. 12 | % 13 | % An alternative to using the API is to load the annotations directly 14 | % into a Matlab struct. This can be achieved via: 15 | % data = gason(fileread(annFile)); 16 | % Using the API provides additional utility functions. Note that this API 17 | % supports both *instance* and *caption* annotations. In the case of 18 | % captions not all functions are defined (e.g. categories are undefined). 19 | % 20 | % The following API functions are defined: 21 | % CocoApi - Load COCO annotation file and prepare data structures. 22 | % getAnnIds - Get ann ids that satisfy given filter conditions. 23 | % getCatIds - Get cat ids that satisfy given filter conditions. 24 | % getImgIds - Get img ids that satisfy given filter conditions. 25 | % loadAnns - Load anns with the specified ids. 26 | % loadCats - Load cats with the specified ids. 27 | % loadImgs - Load imgs with the specified ids. 28 | % showAnns - Display the specified annotations. 29 | % loadRes - Load algorithm results and create API for accessing them. 30 | % Throughout the API "ann"=annotation, "cat"=category, and "img"=image. 31 | % Help on each functions can be accessed by: "help CocoApi>function". 32 | % 33 | % See also CocoApi>CocoApi, CocoApi>getAnnIds, CocoApi>getCatIds, 34 | % CocoApi>getImgIds, CocoApi>loadAnns, CocoApi>loadCats, 35 | % CocoApi>loadImgs, CocoApi>showAnns, CocoApi>loadRes 36 | % 37 | % Microsoft COCO Toolbox. version 2.0 38 | % Data, paper, and tutorials available at: http://mscoco.org/ 39 | % Code written by Piotr Dollar and Tsung-Yi Lin, 2015. 40 | % Licensed under the Simplified BSD License [see coco/license.txt] 41 | 42 | properties 43 | data % COCO annotation data structure 44 | inds % data structures for fast indexing 45 | end 46 | 47 | methods 48 | function coco = CocoApi( annFile ) 49 | % Load COCO annotation file and prepare data structures. 50 | % 51 | % USAGE 52 | % coco = CocoApi( annFile ) 53 | % 54 | % INPUTS 55 | % annFile - COCO annotation filename 56 | % 57 | % OUTPUTS 58 | % coco - initialized coco object 59 | fprintf('Loading and preparing annotations... '); clk=clock; 60 | if(isstruct(annFile)), coco.data=annFile; else 61 | coco.data=gason(fileread(annFile)); end 62 | is.imgIds = [coco.data.images.id]'; 63 | is.imgIdsMap = makeMap(is.imgIds); 64 | if( isfield(coco.data,'annotations') ) 65 | ann=coco.data.annotations; o=[ann.image_id]; 66 | if(isfield(ann,'category_id')), o=o*1e10+[ann.category_id]; end 67 | [~,o]=sort(o); ann=ann(o); coco.data.annotations=ann; 68 | s={'category_id','area','iscrowd','id','image_id'}; 69 | t={'annCatIds','annAreas','annIscrowd','annIds','annImgIds'}; 70 | for f=1:5, if(isfield(ann,s{f})), is.(t{f})=[ann.(s{f})]'; end; end 71 | is.annIdsMap = makeMap(is.annIds); 72 | is.imgAnnIdsMap = makeMultiMap(is.imgIds,... 73 | is.imgIdsMap,is.annImgIds,is.annIds,0); 74 | end 75 | if( isfield(coco.data,'categories') ) 76 | is.catIds = [coco.data.categories.id]'; 77 | is.catIdsMap = makeMap(is.catIds); 78 | if(isfield(is,'annCatIds')), is.catImgIdsMap = makeMultiMap(... 79 | is.catIds,is.catIdsMap,is.annCatIds,is.annImgIds,1); end 80 | end 81 | coco.inds=is; fprintf('DONE (t=%0.2fs).\n',etime(clock,clk)); 82 | 83 | function map = makeMap( keys ) 84 | % Make map from key to integer id associated with key. 85 | if(isempty(keys)), map=containers.Map(); return; end 86 | map=containers.Map(keys,1:length(keys)); 87 | end 88 | 89 | function map = makeMultiMap( keys, keysMap, keysAll, valsAll, sqz ) 90 | % Make map from keys to set of vals associated with each key. 91 | js=values(keysMap,num2cell(keysAll)); js=[js{:}]; 92 | m=length(js); n=length(keys); k=zeros(1,n); 93 | for i=1:m, j=js(i); k(j)=k(j)+1; end; vs=zeros(n,max(k)); k(:)=0; 94 | for i=1:m, j=js(i); k(j)=k(j)+1; vs(j,k(j))=valsAll(i); end 95 | map = containers.Map('KeyType','double','ValueType','any'); 96 | if(sqz), for j=1:n, map(keys(j))=unique(vs(j,1:k(j))); end 97 | else for j=1:n, map(keys(j))=vs(j,1:k(j)); end; end 98 | end 99 | end 100 | 101 | function ids = getAnnIds( coco, varargin ) 102 | % Get ann ids that satisfy given filter conditions. 103 | % 104 | % USAGE 105 | % ids = coco.getAnnIds( params ) 106 | % 107 | % INPUTS 108 | % params - filtering parameters (struct or name/value pairs) 109 | % setting any filter to [] skips that filter 110 | % .imgIds - [] get anns for given imgs 111 | % .catIds - [] get anns for given cats 112 | % .areaRng - [] get anns for given area range (e.g. [0 inf]) 113 | % .iscrowd - [] get anns for given crowd label (0 or 1) 114 | % 115 | % OUTPUTS 116 | % ids - integer array of ann ids 117 | def = {'imgIds',[],'catIds',[],'areaRng',[],'iscrowd',[]}; 118 | [imgIds,catIds,ar,iscrowd] = getPrmDflt(varargin,def,1); 119 | if( length(imgIds)==1 ) 120 | t = coco.loadAnns(coco.inds.imgAnnIdsMap(imgIds)); 121 | if(~isempty(catIds)), t = t(ismember([t.category_id],catIds)); end 122 | if(~isempty(ar)), a=[t.area]; t = t(a>=ar(1) & a<=ar(2)); end 123 | if(~isempty(iscrowd)), t = t([t.iscrowd]==iscrowd); end 124 | ids = [t.id]; 125 | else 126 | ids=coco.inds.annIds; K = true(length(ids),1); t = coco.inds; 127 | if(~isempty(imgIds)), K = K & ismember(t.annImgIds,imgIds); end 128 | if(~isempty(catIds)), K = K & ismember(t.annCatIds,catIds); end 129 | if(~isempty(ar)), a=t.annAreas; K = K & a>=ar(1) & a<=ar(2); end 130 | if(~isempty(iscrowd)), K = K & t.annIscrowd==iscrowd; end 131 | ids=ids(K); 132 | end 133 | end 134 | 135 | function ids = getCatIds( coco, varargin ) 136 | % Get cat ids that satisfy given filter conditions. 137 | % 138 | % USAGE 139 | % ids = coco.getCatIds( params ) 140 | % 141 | % INPUTS 142 | % params - filtering parameters (struct or name/value pairs) 143 | % setting any filter to [] skips that filter 144 | % .catNms - [] get cats for given cat names 145 | % .supNms - [] get cats for given supercategory names 146 | % .catIds - [] get cats for given cat ids 147 | % 148 | % OUTPUTS 149 | % ids - integer array of cat ids 150 | if(~isfield(coco.data,'categories')), ids=[]; return; end 151 | def={'catNms',[],'supNms',[],'catIds',[]}; t=coco.data.categories; 152 | [catNms,supNms,catIds] = getPrmDflt(varargin,def,1); 153 | if(~isempty(catNms)), t = t(ismember({t.name},catNms)); end 154 | if(~isempty(supNms)), t = t(ismember({t.supercategory},supNms)); end 155 | if(~isempty(catIds)), t = t(ismember([t.id],catIds)); end 156 | ids = [t.id]; 157 | end 158 | 159 | function ids = getImgIds( coco, varargin ) 160 | % Get img ids that satisfy given filter conditions. 161 | % 162 | % USAGE 163 | % ids = coco.getImgIds( params ) 164 | % 165 | % INPUTS 166 | % params - filtering parameters (struct or name/value pairs) 167 | % setting any filter to [] skips that filter 168 | % .imgIds - [] get imgs for given ids 169 | % .catIds - [] get imgs with all given cats 170 | % 171 | % OUTPUTS 172 | % ids - integer array of img ids 173 | def={'imgIds',[],'catIds',[]}; ids=coco.inds.imgIds; 174 | [imgIds,catIds] = getPrmDflt(varargin,def,1); 175 | if(~isempty(imgIds)), ids=intersect(ids,imgIds); end 176 | if(isempty(catIds)), return; end 177 | t=values(coco.inds.catImgIdsMap,num2cell(catIds)); 178 | for i=1:length(t), ids=intersect(ids,t{i}); end 179 | end 180 | 181 | function anns = loadAnns( coco, ids ) 182 | % Load anns with the specified ids. 183 | % 184 | % USAGE 185 | % anns = coco.loadAnns( ids ) 186 | % 187 | % INPUTS 188 | % ids - integer ids specifying anns 189 | % 190 | % OUTPUTS 191 | % anns - loaded ann objects 192 | ids = values(coco.inds.annIdsMap,num2cell(ids)); 193 | anns = coco.data.annotations([ids{:}]); 194 | end 195 | 196 | function cats = loadCats( coco, ids ) 197 | % Load cats with the specified ids. 198 | % 199 | % USAGE 200 | % cats = coco.loadCats( ids ) 201 | % 202 | % INPUTS 203 | % ids - integer ids specifying cats 204 | % 205 | % OUTPUTS 206 | % cats - loaded cat objects 207 | if(~isfield(coco.data,'categories')), cats=[]; return; end 208 | ids = values(coco.inds.catIdsMap,num2cell(ids)); 209 | cats = coco.data.categories([ids{:}]); 210 | end 211 | 212 | function imgs = loadImgs( coco, ids ) 213 | % Load imgs with the specified ids. 214 | % 215 | % USAGE 216 | % imgs = coco.loadImgs( ids ) 217 | % 218 | % INPUTS 219 | % ids - integer ids specifying imgs 220 | % 221 | % OUTPUTS 222 | % imgs - loaded img objects 223 | ids = values(coco.inds.imgIdsMap,num2cell(ids)); 224 | imgs = coco.data.images([ids{:}]); 225 | end 226 | 227 | function hs = showAnns( coco, anns ) 228 | % Display the specified annotations. 229 | % 230 | % USAGE 231 | % hs = coco.showAnns( anns ) 232 | % 233 | % INPUTS 234 | % anns - annotations to display 235 | % 236 | % OUTPUTS 237 | % hs - handles to segment graphic objects 238 | n=length(anns); if(n==0), return; end 239 | r=.4:.2:1; [r,g,b]=ndgrid(r,r,r); cs=[r(:) g(:) b(:)]; 240 | cs=cs(randperm(size(cs,1)),:); cs=repmat(cs,100,1); 241 | if( isfield( anns,'keypoints') ) 242 | for i=1:n 243 | a=anns(i); if(isfield(a,'iscrowd') && a.iscrowd), continue; end 244 | seg={}; if(isfield(a,'segmentation')), seg=a.segmentation; end 245 | k=a.keypoints; x=k(1:3:end)+1; y=k(2:3:end)+1; v=k(3:3:end); 246 | k=coco.loadCats(a.category_id); k=k.skeleton; c=cs(i,:); hold on 247 | p={'FaceAlpha',.25,'LineWidth',2,'EdgeColor',c}; % polygon 248 | for j=seg, xy=j{1}+.5; fill(xy(1:2:end),xy(2:2:end),c,p{:}); end 249 | p={'Color',c,'LineWidth',3}; % skeleton 250 | for j=k, s=j{1}; if(all(v(s)>0)), line(x(s),y(s),p{:}); end; end 251 | p={'MarkerSize',8,'MarkerFaceColor',c,'MarkerEdgeColor'}; % pnts 252 | plot(x(v>0),y(v>0),'o',p{:},'k'); 253 | plot(x(v>1),y(v>1),'o',p{:},c); hold off; 254 | end 255 | elseif( any(isfield(anns,{'segmentation','bbox'})) ) 256 | if(~isfield(anns,'iscrowd')), [anns(:).iscrowd]=deal(0); end 257 | if(~isfield(anns,'segmentation')), S={anns.bbox}; %#ok 258 | for i=1:n, x=S{i}(1); w=S{i}(3); y=S{i}(2); h=S{i}(4); 259 | anns(i).segmentation={[x,y,x,y+h,x+w,y+h,x+w,y]}; end; end 260 | S={anns.segmentation}; hs=zeros(10000,1); k=0; hold on; 261 | pFill={'FaceAlpha',.4,'LineWidth',3}; 262 | for i=1:n 263 | if(anns(i).iscrowd), C=[.01 .65 .40]; else C=rand(1,3); end 264 | if(isstruct(S{i})), M=double(MaskApi.decode(S{i})); k=k+1; 265 | hs(k)=imagesc(cat(3,M*C(1),M*C(2),M*C(3)),'Alphadata',M*.5); 266 | else for j=1:length(S{i}), P=S{i}{j}+.5; k=k+1; 267 | hs(k)=fill(P(1:2:end),P(2:2:end),C,pFill{:}); end 268 | end 269 | end 270 | hs=hs(1:k); hold off; 271 | elseif( isfield(anns,'caption') ) 272 | S={anns.caption}; 273 | for i=1:n, S{i}=[int2str(i) ') ' S{i} '\newline']; end 274 | S=[S{:}]; title(S,'FontSize',12); 275 | end 276 | end 277 | 278 | function cocoRes = loadRes( coco, resFile ) 279 | % Load algorithm results and create API for accessing them. 280 | % 281 | % The API for accessing and viewing algorithm results is identical to 282 | % the CocoApi for the ground truth. The single difference is that the 283 | % ground truth results are replaced by the algorithm results. 284 | % 285 | % USAGE 286 | % cocoRes = coco.loadRes( resFile ) 287 | % 288 | % INPUTS 289 | % resFile - COCO results filename 290 | % 291 | % OUTPUTS 292 | % cocoRes - initialized results API 293 | fprintf('Loading and preparing results... '); clk=clock; 294 | cdata=coco.data; R=gason(fileread(resFile)); m=length(R); 295 | valid=ismember([R.image_id],[cdata.images.id]); 296 | if(~all(valid)), error('Results provided for invalid images.'); end 297 | t={'segmentation','bbox','keypoints','caption'}; t=t{isfield(R,t)}; 298 | if(strcmp(t,'caption')) 299 | for i=1:m, R(i).id=i; end; imgs=cdata.images; 300 | cdata.images=imgs(ismember([imgs.id],[R.image_id])); 301 | else 302 | assert(all(isfield(R,{'category_id','score',t}))); 303 | s=cat(1,R.(t)); if(strcmp(t,'bbox')), a=s(:,3).*s(:,4); end 304 | if(strcmp(t,'segmentation')), a=MaskApi.area(s); end 305 | if(strcmp(t,'keypoints')), x=s(:,1:3:end)'; y=s(:,2:3:end)'; 306 | a=(max(x)-min(x)).*(max(y)-min(y)); end 307 | for i=1:m, R(i).area=a(i); R(i).id=i; end 308 | end 309 | fprintf('DONE (t=%0.2fs).\n',etime(clock,clk)); 310 | cdata.annotations=R; cocoRes=CocoApi(cdata); 311 | end 312 | end 313 | 314 | end 315 | -------------------------------------------------------------------------------- /cocoapi/MatlabAPI/CocoUtils.m: -------------------------------------------------------------------------------- 1 | classdef CocoUtils 2 | % Utility functions for testing and validation of COCO code. 3 | % 4 | % The following utility functions are defined: 5 | % convertPascalGt - Convert ground truth for PASCAL to COCO format. 6 | % convertImageNetGt - Convert ground truth for ImageNet to COCO format. 7 | % convertPascalDt - Convert detections on PASCAL to COCO format. 8 | % convertImageNetDt - Convert detections on ImageNet to COCO format. 9 | % validateOnPascal - Validate COCO eval code against PASCAL code. 10 | % validateOnImageNet - Validate COCO eval code against ImageNet code. 11 | % generateFakeDt - Generate fake detections from ground truth. 12 | % validateMaskApi - Validate MaskApi against Matlab functions. 13 | % gasonSplit - Split JSON file into multiple JSON files. 14 | % gasonMerge - Merge JSON files into single JSON file. 15 | % Help on each functions can be accessed by: "help CocoUtils>function". 16 | % 17 | % See also CocoApi MaskApi CocoEval CocoUtils>convertPascalGt 18 | % CocoUtils>convertImageNetGt CocoUtils>convertPascalDt 19 | % CocoUtils>convertImageNetDt CocoUtils>validateOnPascal 20 | % CocoUtils>validateOnImageNet CocoUtils>generateFakeDt 21 | % CocoUtils>validateMaskApi CocoUtils>gasonSplit CocoUtils>gasonMerge 22 | % 23 | % Microsoft COCO Toolbox. version 2.0 24 | % Data, paper, and tutorials available at: http://mscoco.org/ 25 | % Code written by Piotr Dollar and Tsung-Yi Lin, 2015. 26 | % Licensed under the Simplified BSD License [see coco/license.txt] 27 | 28 | methods( Static ) 29 | function convertPascalGt( dataDir, year, split, annFile ) 30 | % Convert ground truth for PASCAL to COCO format. 31 | % 32 | % USAGE 33 | % CocoUtils.convertPascalGt( dataDir, year, split, annFile ) 34 | % 35 | % INPUTS 36 | % dataDir - dir containing VOCdevkit/ 37 | % year - dataset year (e.g. '2007') 38 | % split - dataset split (e.g. 'val') 39 | % annFile - annotation file for writing results 40 | if(exist(annFile,'file')), return; end 41 | fprintf('Converting PASCAL VOC dataset... '); clk=tic; 42 | dev=[dataDir '/VOCdevkit/']; addpath(genpath([dev '/VOCcode'])); 43 | VOCinit; C=VOCopts.classes'; catsMap=containers.Map(C,1:length(C)); 44 | f=fopen([dev '/VOC' year '/ImageSets/Main/' split '.txt']); 45 | is=textscan(f,'%s %*s'); is=is{1}; fclose(f); n=length(is); 46 | data=CocoUtils.initData(C,n); 47 | for i=1:n, nm=[is{i} '.jpg']; 48 | f=[dev '/VOC' year '/Annotations/' is{i} '.xml']; 49 | R=PASreadrecord(f); hw=R.imgsize([2 1]); O=R.objects; 50 | id=is{i}; id(id=='_')=[]; id=str2double(id); 51 | ignore=[O.difficult]; bbs=cat(1,O.bbox); 52 | t=catsMap.values({O.class}); catIds=[t{:}]; iscrowd=ignore*0; 53 | data=CocoUtils.addData(data,nm,id,hw,catIds,ignore,iscrowd,bbs); 54 | end 55 | f=fopen(annFile,'w'); fwrite(f,gason(data)); fclose(f); 56 | fprintf('DONE (t=%0.2fs).\n',toc(clk)); 57 | end 58 | 59 | function convertImageNetGt( dataDir, year, split, annFile ) 60 | % Convert ground truth for ImageNet to COCO format. 61 | % 62 | % USAGE 63 | % CocoUtils.convertImageNetGt( dataDir, year, split, annFile ) 64 | % 65 | % INPUTS 66 | % dataDir - dir containing ILSVRC*/ folders 67 | % year - dataset year (e.g. '2013') 68 | % split - dataset split (e.g. 'val') 69 | % annFile - annotation file for writing results 70 | if(exist(annFile,'file')), return; end 71 | fprintf('Converting ImageNet dataset... '); clk=tic; 72 | dev=[dataDir '/ILSVRC' year '_devkit/']; 73 | addpath(genpath([dev '/evaluation/'])); 74 | t=[dev '/data/meta_det.mat']; 75 | t=load(t); synsets=t.synsets(1:200); catNms={synsets.name}; 76 | catsMap=containers.Map({synsets.WNID},1:length(catNms)); 77 | if(~strcmp(split,'val')), blacklist=cell(1,2); else 78 | f=[dev '/data/' 'ILSVRC' year '_det_validation_blacklist.txt']; 79 | f=fopen(f); blacklist=textscan(f,'%d %s'); fclose(f); 80 | t=catsMap.values(blacklist{2}); blacklist{2}=[t{:}]; 81 | end 82 | if(strcmp(split,'train')) 83 | dl=@(i) [dev '/data/det_lists/' split '_pos_' int2str(i) '.txt']; 84 | is=cell(1,200); for i=1:200, f=fopen(dl(i)); 85 | is{i}=textscan(f,'%s %*s'); is{i}=is{i}{1}; fclose(f); end 86 | is=unique(cat(1,is{:})); n=length(is); 87 | else 88 | f=fopen([dev '/data/det_lists/' split '.txt']); 89 | is=textscan(f,'%s %*s'); is=is{1}; fclose(f); n=length(is); 90 | end 91 | data=CocoUtils.initData(catNms,n); 92 | for i=1:n 93 | f=[dataDir '/ILSVRC' year '_DET_bbox_' split '/' is{i} '.xml']; 94 | R=VOCreadxml(f); R=R.annotation; nm=[is{i} '.JPEG']; 95 | hw=str2double({R.size.height R.size.width}); 96 | if(~isfield(R,'object')), catIds=[]; bbs=[]; else 97 | O=R.object; t=catsMap.values({O.name}); catIds=[t{:}]; 98 | b=[O.bndbox]; bbs=str2double({b.xmin; b.ymin; b.xmax; b.ymax})'; 99 | end 100 | j=blacklist{2}(blacklist{1}==i); m=numel(j); b=[0 0 hw(2) hw(1)]; 101 | catIds=[j catIds]; bbs=[repmat(b,m,1); bbs]; %#ok 102 | ignore=ismember(catIds,j); iscrowd=ignore*0; iscrowd(1:m)=1; 103 | data=CocoUtils.addData(data,nm,i,hw,catIds,ignore,iscrowd,bbs); 104 | end 105 | f=fopen(annFile,'w'); fwrite(f,gason(data)); fclose(f); 106 | fprintf('DONE (t=%0.2fs).\n',toc(clk)); 107 | end 108 | 109 | function convertPascalDt( srcFiles, tarFile ) 110 | % Convert detections on PASCAL to COCO format. 111 | % 112 | % USAGE 113 | % CocoUtils.convertPascalDt( srcFiles, tarFile ) 114 | % 115 | % INPUTS 116 | % srcFiles - source detection file(s) in PASCAL format 117 | % tarFile - target detection file in COCO format 118 | if(exist(tarFile,'file')), return; end; R=[]; 119 | for i=1:length(srcFiles), f=fopen(srcFiles{i},'r'); 120 | R1=textscan(f,'%d %f %f %f %f %f'); fclose(f); 121 | [~,~,x0,y0,x1,y1]=deal(R1{:}); b=[x0-1 y0-1 x1-x0+1 y1-y0+1]; 122 | b(:,3:4)=max(b(:,3:4),1); b=mat2cell(b,ones(1,size(b,1)),4); 123 | R=[R; struct('image_id',num2cell(R1{1}),'bbox',b,... 124 | 'category_id',i,'score',num2cell(R1{2}))]; %#ok 125 | end 126 | f=fopen(tarFile,'w'); fwrite(f,gason(R)); fclose(f); 127 | end 128 | 129 | function convertImageNetDt( srcFile, tarFile ) 130 | % Convert detections on ImageNet to COCO format. 131 | % 132 | % USAGE 133 | % CocoUtils.convertImageNetDt( srcFile, tarFile ) 134 | % 135 | % INPUTS 136 | % srcFile - source detection file in ImageNet format 137 | % tarFile - target detection file in COCO format 138 | if(exist(tarFile,'file')), return; end; f=fopen(srcFile,'r'); 139 | R=textscan(f,'%d %d %f %f %f %f %f'); fclose(f); 140 | [~,~,~,x0,y0,x1,y1]=deal(R{:}); b=[x0-1 y0-1 x1-x0+1 y1-y0+1]; 141 | b(:,3:4)=max(b(:,3:4),1); bbox=mat2cell(b,ones(1,size(b,1)),4); 142 | R=struct('image_id',num2cell(R{1}),'bbox',bbox,... 143 | 'category_id',num2cell(R{2}),'score',num2cell(R{3})); 144 | f=fopen(tarFile,'w'); fwrite(f,gason(R)); fclose(f); 145 | end 146 | 147 | function validateOnPascal( dataDir ) 148 | % Validate COCO eval code against PASCAL code. 149 | % 150 | % USAGE 151 | % CocoUtils.validateOnPascal( dataDir ) 152 | % 153 | % INPUTS 154 | % dataDir - dir containing VOCdevkit/ 155 | split='val'; year='2007'; thrs=0:.001:1; T=length(thrs); 156 | dev=[dataDir '/VOCdevkit/']; addpath(genpath([dev '/VOCcode/'])); 157 | d=pwd; cd(dev); VOCinit; cd(d); O=VOCopts; O.testset=split; 158 | O.detrespath=[O.detrespath(1:end-10) split '_%s.txt']; 159 | catNms=O.classes; K=length(catNms); ap=zeros(K,1); 160 | for i=1:K, [R,P]=VOCevaldet(O,'comp3',catNms{i},0); R1=[R; inf]; 161 | P1=[P; 0]; for t=1:T, ap(i)=ap(i)+max(P1(R1>=thrs(t)))/T; end; end 162 | srcFile=[dev '/results/VOC' year '/Main/comp3_det_' split]; 163 | resFile=[srcFile '.json']; annFile=[dev '/VOC2007/' split '.json']; 164 | sfs=cell(1,K); for i=1:K, sfs{i}=[srcFile '_' catNms{i} '.txt']; end 165 | CocoUtils.convertPascalGt(dataDir,year,split,annFile); 166 | CocoUtils.convertPascalDt(sfs,resFile); 167 | D=CocoApi(annFile); R=D.loadRes(resFile); E=CocoEval(D,R); 168 | p=E.params; p.recThrs=thrs; p.iouThrs=.5; p.areaRng=[0 inf]; 169 | p.useSegm=0; p.maxDets=inf; E.params=p; E.evaluate(); E.accumulate(); 170 | apCoco=squeeze(mean(E.eval.precision,2)); deltas=abs(apCoco-ap); 171 | fprintf('AP delta: mean=%.2e median=%.2e max=%.2e\n',... 172 | mean(deltas),median(deltas),max(deltas)) 173 | if(max(deltas)>1e-2), msg='FAILED'; else msg='PASSED'; end 174 | warning(['Eval code *' msg '* validation!']); 175 | end 176 | 177 | function validateOnImageNet( dataDir ) 178 | % Validate COCO eval code against ImageNet code. 179 | % 180 | % USAGE 181 | % CocoUtils.validateOnImageNet( dataDir ) 182 | % 183 | % INPUTS 184 | % dataDir - dir containing ILSVRC*/ folders 185 | warning(['Set pixelTolerance=0 in line 30 of eval_detection.m '... 186 | '(and delete cache) otherwise AP will differ by >1e-4!']); 187 | year='2013'; dev=[dataDir '/ILSVRC' year '_devkit/']; 188 | fs = { [dev 'evaluation/demo.val.pred.det.txt'] 189 | [dataDir '/ILSVRC' year '_DET_bbox_val/'] 190 | [dev 'data/meta_det.mat'] 191 | [dev 'data/det_lists/val.txt'] 192 | [dev 'data/ILSVRC' year '_det_validation_blacklist.txt'] 193 | [dev 'data/ILSVRC' year '_det_validation_cache.mat'] }; 194 | addpath(genpath([dev 'evaluation/'])); 195 | ap=eval_detection(fs{:})'; 196 | resFile=[fs{1}(1:end-3) 'json']; 197 | annFile=[dev 'data/ILSVRC' year '_val.json']; 198 | CocoUtils.convertImageNetDt(fs{1},resFile); 199 | CocoUtils.convertImageNetGt(dataDir,year,'val',annFile) 200 | D=CocoApi(annFile); R=D.loadRes(resFile); E=CocoEval(D,R); 201 | p=E.params; p.recThrs=0:.0001:1; p.iouThrs=.5; p.areaRng=[0 inf]; 202 | p.useSegm=0; p.maxDets=inf; E.params=p; E.evaluate(); E.accumulate(); 203 | apCoco=squeeze(mean(E.eval.precision,2)); deltas=abs(apCoco-ap); 204 | fprintf('AP delta: mean=%.2e median=%.2e max=%.2e\n',... 205 | mean(deltas),median(deltas),max(deltas)) 206 | if(max(deltas)>1e-4), msg='FAILED'; else msg='PASSED'; end 207 | warning(['Eval code *' msg '* validation!']); 208 | end 209 | 210 | function generateFakeDt( coco, dtFile, varargin ) 211 | % Generate fake detections from ground truth. 212 | % 213 | % USAGE 214 | % CocoUtils.generateFakeDt( coco, dtFile, varargin ) 215 | % 216 | % INPUTS 217 | % coco - instance of CocoApi containing ground truth 218 | % dtFile - target file for writing detection results 219 | % params - parameters (struct or name/value pairs) 220 | % .n - [100] number images for which to generate dets 221 | % .fn - [.20] false negative rate (00; if(~any(v)), continue; end 251 | x=o(1:3:end); y=o(2:3:end); x(~v)=mean(x(v)); y(~v)=mean(y(v)); 252 | x=max(0,min(w-1,x+dx)); o(1:3:end)=x; o(2:3:end)=y; 253 | end 254 | k=k+1; R(k).image_id=imgIds(i); R(k).category_id=catId; 255 | R(k).(opts.type)=o; R(k).score=round(rand(rstream)*1000)/1000; 256 | end 257 | end 258 | R=R(1:k); f=fopen(dtFile,'w'); fwrite(f,gason(R)); fclose(f); 259 | fprintf('DONE (t=%0.2fs).\n',toc(clk)); 260 | end 261 | 262 | function validateMaskApi( coco ) 263 | % Validate MaskApi against Matlab functions. 264 | % 265 | % USAGE 266 | % CocoUtils.validateMaskApi( coco ) 267 | % 268 | % INPUTS 269 | % coco - instance of CocoApi containing ground truth 270 | S=coco.data.annotations; S=S(~[S.iscrowd]); S={S.segmentation}; 271 | h=1000; n=1000; Z=cell(1,n); A=Z; B=Z; M=Z; IB=zeros(1,n); 272 | fprintf('Running MaskApi implementations... '); clk=tic; 273 | for i=1:n, A{i}=MaskApi.frPoly(S{i},h,h); end 274 | Ia=MaskApi.iou(A{1},[A{:}]); 275 | fprintf('DONE (t=%0.2fs).\n',toc(clk)); 276 | fprintf('Running Matlab implementations... '); clk=tic; 277 | for i=1:n, M1=0; for j=1:length(S{i}), x=S{i}{j}+.5; 278 | M1=M1+poly2mask(x(1:2:end),x(2:2:end),h,h); end 279 | M{i}=uint8(M1>0); B{i}=MaskApi.encode(M{i}); 280 | IB(i)=sum(sum(M{1}&M{i}))/sum(sum(M{1}|M{i})); 281 | end 282 | fprintf('DONE (t=%0.2fs).\n',toc(clk)); 283 | if(isequal(A,B)&&isequal(Ia,IB)), 284 | msg='PASSED'; else msg='FAILED'; end 285 | warning(['MaskApi *' msg '* validation!']); 286 | end 287 | 288 | function gasonSplit( name, k ) 289 | % Split JSON file into multiple JSON files. 290 | % 291 | % Splits file 'name.json' into multiple files 'name-*.json'. Only 292 | % works for JSON arrays. Memory efficient. Inverted by gasonMerge(). 293 | % 294 | % USAGE 295 | % CocoUtils.gasonSplit( name, k ) 296 | % 297 | % INPUTS 298 | % name - file containing JSON array (w/o '.json' ext) 299 | % k - number of files to split JSON into 300 | s=gasonMex('split',fileread([name '.json']),k); k=length(s); 301 | for i=1:k, f=fopen(sprintf('%s-%06i.json',name,i),'w'); 302 | fwrite(f,s{i}); fclose(f); end 303 | end 304 | 305 | function gasonMerge( name ) 306 | % Merge JSON files into single JSON file. 307 | % 308 | % Merge files 'name-*.json' into single file 'name.json'. Only works 309 | % for JSON arrays. Memory efficient. Inverted by gasonSplit(). 310 | % 311 | % USAGE 312 | % CocoUtils.gasonMerge( name ) 313 | % 314 | % INPUTS 315 | % name - files containing JSON arrays (w/o '.json' ext) 316 | s=dir([name '-*.json']); s=sort({s.name}); k=length(s); 317 | p=fileparts(name); for i=1:k, s{i}=fullfile(p,s{i}); end 318 | for i=1:k, s{i}=fileread(s{i}); end; s=gasonMex('merge',s); 319 | f=fopen([name '.json'],'w'); fwrite(f,s); fclose(f); 320 | end 321 | end 322 | 323 | methods( Static, Access=private ) 324 | function data = initData( catNms, n ) 325 | % Helper for convert() functions: init annotations. 326 | m=length(catNms); ms=num2cell(1:m); 327 | I = struct('file_name',0,'height',0,'width',0,'id',0); 328 | C = struct('supercategory','none','id',ms,'name',catNms); 329 | A = struct('segmentation',0,'area',0,'iscrowd',0,... 330 | 'image_id',0,'bbox',0,'category_id',0,'id',0,'ignore',0); 331 | I=repmat(I,1,n); A=repmat(A,1,n*20); 332 | data = struct('images',I,'type','instances',... 333 | 'annotations',A,'categories',C,'nImgs',0,'nAnns',0); 334 | end 335 | 336 | function data = addData( data,nm,id,hw,catIds,ignore,iscrowd,bbs ) 337 | % Helper for convert() functions: add annotations. 338 | data.nImgs=data.nImgs+1; 339 | data.images(data.nImgs)=struct('file_name',nm,... 340 | 'height',hw(1),'width',hw(2),'id',id); 341 | for j=1:length(catIds), data.nAnns=data.nAnns+1; k=data.nAnns; 342 | b=bbs(j,:); b=b-1; b(3:4)=b(3:4)-b(1:2)+1; 343 | x1=b(1); x2=b(1)+b(3); y1=b(2); y2=b(2)+b(4); 344 | S={{[x1 y1 x1 y2 x2 y2 x2 y1]}}; a=b(3)*b(4); 345 | data.annotations(k)=struct('segmentation',S,'area',a,... 346 | 'iscrowd',iscrowd(j),'image_id',id,'bbox',b,... 347 | 'category_id',catIds(j),'id',k,'ignore',ignore(j)); 348 | end 349 | if( data.nImgs == length(data.images) ) 350 | data.annotations=data.annotations(1:data.nAnns); 351 | data=rmfield(data,{'nImgs','nAnns'}); 352 | end 353 | end 354 | end 355 | 356 | end 357 | -------------------------------------------------------------------------------- /detectron2/evaluation/lrp_evaluator.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | import contextlib 3 | import copy 4 | import io 5 | import itertools 6 | import json 7 | import logging 8 | import numpy as np 9 | import os 10 | import pickle 11 | from collections import OrderedDict 12 | import pycocotools.mask as mask_util 13 | import torch 14 | from fvcore.common.file_io import PathManager 15 | from pycocotools.coco import COCO 16 | from tabulate import tabulate 17 | 18 | import detectron2.utils.comm as comm 19 | from detectron2.data import MetadataCatalog 20 | from detectron2.data.datasets.coco import convert_to_coco_json 21 | #from detectron2.evaluation.fast_eval_api import COCOeval_opt as COCOeval 22 | from pycocotools.cocoeval import COCOeval 23 | from detectron2.structures import Boxes, BoxMode, pairwise_iou 24 | from detectron2.utils.logger import create_small_table 25 | 26 | from .evaluator import DatasetEvaluator 27 | 28 | 29 | class LRPEvaluator(DatasetEvaluator): 30 | """ 31 | Evaluate LRP for segmentation 32 | """ 33 | def __init__(self, dataset_name, cfg, distributed, output_dir=None): 34 | """ 35 | Args: 36 | dataset_name (str): name of the dataset to be evaluated. 37 | It must have either the following corresponding metadata: 38 | 39 | "json_file": the path to the COCO format annotation 40 | 41 | Or it must be in detectron2's standard dataset format 42 | so it can be converted to COCO format automatically. 43 | cfg (CfgNode): config instance 44 | distributed (True): if True, will collect results from all ranks and run evaluation 45 | in the main process. 46 | Otherwise, will evaluate the results in the current process. 47 | output_dir (str): optional, an output directory to dump all 48 | results predicted on the dataset. The dump contains two files: 49 | 50 | 1. "instance_predictions.pth" a file in torch serialization 51 | format that contains all the raw original predictions. 52 | 2. "coco_instances_results.json" a json file in COCO's result 53 | format. 54 | """ 55 | self._tasks = self._tasks_from_config(cfg) 56 | self._distributed = distributed 57 | self._output_dir = output_dir 58 | 59 | self._cpu_device = torch.device("cpu") 60 | self._logger = logging.getLogger(__name__) 61 | 62 | self._metadata = MetadataCatalog.get(dataset_name) 63 | if not hasattr(self._metadata, "json_file"): 64 | self._logger.info( 65 | f"'{dataset_name}' is not registered by `register_coco_instances`." 66 | " Therefore trying to convert it to COCO format ..." 67 | ) 68 | 69 | cache_path = os.path.join(output_dir, f"{dataset_name}_coco_format.json") 70 | self._metadata.json_file = cache_path 71 | convert_to_coco_json(dataset_name, cache_path) 72 | 73 | json_file = PathManager.get_local_path(self._metadata.json_file) 74 | with contextlib.redirect_stdout(io.StringIO()): 75 | self._coco_api = COCO(json_file) 76 | 77 | self._kpt_oks_sigmas = cfg.TEST.KEYPOINT_OKS_SIGMAS 78 | # Test set json files do not contain annotations (evaluation must be 79 | # performed using the COCO evaluation server). 80 | self._do_evaluation = "annotations" in self._coco_api.dataset 81 | 82 | def reset(self): 83 | self._predictions = [] 84 | 85 | def _tasks_from_config(self, cfg): 86 | """ 87 | Returns: 88 | tuple[str]: tasks that can be evaluated under the given configuration. 89 | """ 90 | tasks = ("bbox",) 91 | if cfg.MODEL.MASK_ON: 92 | tasks = tasks + ("segm",) 93 | if cfg.MODEL.KEYPOINT_ON: 94 | tasks = tasks + ("keypoints",) 95 | return tasks 96 | 97 | def process(self, inputs, outputs): 98 | """ 99 | Args: 100 | inputs: the inputs to a COCO model (e.g., GeneralizedRCNN). 101 | It is a list of dict. Each dict corresponds to an image and 102 | contains keys like "height", "width", "file_name", "image_id". 103 | outputs: the outputs of a COCO model. It is a list of dicts with key 104 | "instances" that contains :class:`Instances`. 105 | """ 106 | for input, output in zip(inputs, outputs): 107 | prediction = {"image_id": input["image_id"]} 108 | # TODO this is ugly 109 | if "instances" in output: 110 | instances = output["instances"].to(self._cpu_device) 111 | prediction["instances"] = instances_to_coco_json(instances, input["image_id"]) 112 | if "proposals" in output: 113 | prediction["proposals"] = output["proposals"].to(self._cpu_device) 114 | self._predictions.append(prediction) 115 | 116 | def evaluate(self): 117 | if self._distributed: 118 | comm.synchronize() 119 | predictions = comm.gather(self._predictions, dst=0) 120 | predictions = list(itertools.chain(*predictions)) 121 | 122 | if not comm.is_main_process(): 123 | return {} 124 | else: 125 | predictions = self._predictions 126 | 127 | if len(predictions) == 0: 128 | self._logger.warning("[LRPEvaluator] Did not receive valid predictions.") 129 | return {} 130 | 131 | if self._output_dir: 132 | PathManager.mkdirs(self._output_dir) 133 | file_path = os.path.join(self._output_dir, "instances_predictions.pth") 134 | with PathManager.open(file_path, "wb") as f: 135 | torch.save(predictions, f) 136 | 137 | self._results = OrderedDict() 138 | if "instances" in predictions[0]: 139 | self._eval_predictions(set(self._tasks), predictions) 140 | # Copy so the caller can do whatever with results 141 | return copy.deepcopy(self._results) 142 | 143 | def _eval_predictions(self, tasks, predictions): 144 | """ 145 | Evaluate predictions on the given tasks. 146 | Fill self._results with the metrics of the tasks. 147 | """ 148 | self._logger.info("Preparing results for COCO format ...") 149 | coco_results = list(itertools.chain(*[x["instances"] for x in predictions])) 150 | 151 | # unmap the category ids for COCO 152 | if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"): 153 | reverse_id_mapping = { 154 | v: k for k, v in self._metadata.thing_dataset_id_to_contiguous_id.items() 155 | } 156 | for result in coco_results: 157 | category_id = result["category_id"] 158 | assert ( 159 | category_id in reverse_id_mapping 160 | ), "A prediction has category_id={}, which is not available in the dataset.".format( 161 | category_id 162 | ) 163 | result["category_id"] = reverse_id_mapping[category_id] 164 | 165 | if self._output_dir: 166 | file_path = os.path.join(self._output_dir, "coco_instances_results.json") 167 | self._logger.info("Saving results to {}".format(file_path)) 168 | with PathManager.open(file_path, "w") as f: 169 | f.write(json.dumps(coco_results)) 170 | f.flush() 171 | 172 | if not self._do_evaluation: 173 | self._logger.info("Annotations are not available for evaluation.") 174 | return 175 | 176 | self._logger.info("Evaluating predictions ...") 177 | for task in sorted(tasks): 178 | coco_eval = ( 179 | _evaluate_predictions_on_coco( 180 | self._coco_api, coco_results, task, kpt_oks_sigmas=self._kpt_oks_sigmas 181 | ) 182 | if len(coco_results) > 0 183 | else None # cocoapi does not handle empty results very well 184 | ) 185 | if task != 'segm': 186 | continue 187 | res = self._derive_coco_results( 188 | coco_eval, task, class_names=self._metadata.get("thing_classes") 189 | ) 190 | self._results['lrp-' + task] = res 191 | 192 | def _derive_coco_results(self, coco_eval, iou_type, class_names=None): 193 | """ 194 | Derive the desired score numbers from summarized COCOeval. 195 | 196 | Args: 197 | coco_eval (None or COCOEval): None represents no predictions from model. 198 | iou_type (str): 199 | class_names (None or list[str]): if provided, will use it to predict 200 | per-category AP. 201 | 202 | Returns: 203 | a dict of {metric name: score} 204 | """ 205 | 206 | metrics = { 207 | "segm": ['LRP', 'LRP-loc', 'LRP-FP', 'LRP-FN'], 208 | }[iou_type] 209 | 210 | if coco_eval is None: 211 | self._logger.warn("No predictions from the model!") 212 | return {metric: float("nan") for metric in metrics} 213 | 214 | # the standard metrics 215 | results = dict() 216 | # calculate TP, FP, FN, q(gi, dgi), final results for each class 217 | tpcalc = dict() 218 | fpcalc = dict() 219 | fncalc = dict() 220 | qcalc = dict() 221 | # per class result (lrp, lrploc, lrpfp, lrpfn) 222 | lrp_class, lrp_loc_class, lrp_fp_class, lrp_fn_class = [dict() for _ in range(4)] 223 | 224 | # dict to keep area keys intact 225 | area_to_key = [(tuple(area), st) for area, st in zip(coco_eval.params.areaRng, \ 226 | ['all', 'small', 'medium', 'large'])] 227 | area_to_key = dict(area_to_key) 228 | 229 | # get all classes 230 | all_cats = dict() 231 | # Calculate precision recall for each class, area, and iou threshold 232 | for evalImg in coco_eval.evalImgs: 233 | # Skip if maxDets is not the best value, or if evalImg is None (there was no detection) 234 | if evalImg is None: 235 | continue 236 | if evalImg['maxDet'] != coco_eval.params.maxDets[-1]: 237 | continue 238 | # Extract detected and ground truths 239 | # int, [a1, a2], [TxD], [TxD], [G], [G] 240 | # int, [dontcare], [D], [D], [G], [G] (after taking dt[0] and dtIg[0]) 241 | cate_class, area, dt, dtIg, gtIds, gtIg = [evalImg[x] for x in ['category_id', 'aRng', 'dtMatches', 'dtIgnore', 'gtIds', 'gtIgnore']] 242 | all_cats[cate_class] = 1 243 | # get area key 244 | areakey = area_to_key[tuple(area)] 245 | # ignore if not all areas are considered 246 | if areakey != 'all': 247 | continue 248 | # get image id, and iou matrix 249 | ious = evalImg['ious'] 250 | dt = dt[0] 251 | dtIg = dtIg[0] 252 | # [G] 253 | gtm = evalImg['gtMatches'][0] 254 | # Total number of predictions 255 | num_gt = len(gtIds) - np.sum(gtIg) 256 | # total number of detections 257 | #num_dt = dt.shape - sum(dtIg) 258 | # There are some detections, see if there are matches and they are not ignored 259 | # Compute total true positives and false positives for the image (for each threshold) 260 | tp = np.logical_and( dt , np.logical_not(dtIg) ).sum() * 1.0 261 | fp = np.logical_and( np.logical_not(dt), np.logical_not(dtIg) ).sum() * 1.0 262 | fn1 = np.logical_and( np.logical_not(gtm), np.logical_not(gtIg)).sum() * 1.0 263 | fn = num_gt - tp 264 | assert np.abs(fn1 - fn) < 1e-5, 'false negatives do not match, fn = {}, fn1 = {}'.format(fn, fn1) 265 | # add to these values 266 | tpcalc[cate_class] = tpcalc.get(cate_class, 0) + tp 267 | fpcalc[cate_class] = fpcalc.get(cate_class, 0) + fp 268 | fncalc[cate_class] = fncalc.get(cate_class, 0) + fn 269 | # get pairs of tps 270 | tpd_id, tpg_id = [], [] 271 | for dind, gtid in enumerate(dt): # dt contains a set of ids of the corresponding gt 272 | if dtIg[dind]: 273 | continue 274 | # find where gtid is 275 | gtind = np.where([x==gtid for x in gtIds])[0] 276 | if len(gtind) == 0: 277 | continue 278 | gtind = gtind[0] 279 | if gtIg[gtind]: 280 | continue 281 | tpd_id.append(dind) 282 | tpg_id.append(gtind) 283 | # append 284 | assert np.abs(len(tpd_id) - tp) < 1e-5, 'number of true positives doesnt match with matching, tpds={}, tp={}'.format(len(tpd_id), tp) 285 | if tp > 0: 286 | neg_iou = (1-ious[tpd_id, tpg_id]).sum() 287 | qcalc[cate_class] = qcalc.get(cate_class, 0) + neg_iou 288 | 289 | # per class result (lrp, lrploc, lrpfp, lrpfn) 290 | for catid in all_cats.keys(): 291 | tp = tpcalc.get(catid, 0) 292 | fp = fpcalc.get(catid, 0) 293 | fn = fncalc.get(catid, 0) 294 | Z = tp + fp + fn 295 | # skip if theres nothing for this class 296 | if Z <= 0: 297 | continue 298 | q = qcalc.get(catid, 0) # this 'q' is actually sum_ (1 - q) 299 | # calc metrics 300 | _lrp = 1.0/Z * (2 * q + fp + fn) 301 | _lrp_loc = 1/tp * q 302 | _lrp_fp = fp / (tp + fp) 303 | _lrp_fn = fn / (tp + fn) 304 | # append to class 305 | lrp_class[catid] = _lrp 306 | if not np.isnan(_lrp_loc): 307 | lrp_loc_class[catid] = _lrp_loc 308 | if not np.isnan(_lrp_fp): 309 | lrp_fp_class[catid] = _lrp_fp 310 | if not np.isnan(_lrp_fn): 311 | lrp_fn_class[catid] = _lrp_fn 312 | 313 | # get final values 314 | results['lrp'] = np.around(100 * np.mean(list(lrp_class.values())), 4) 315 | results['lrp_loc'] = np.around(100 * np.mean(list(lrp_loc_class.values())), 4) 316 | results['lrp_fp'] = np.around(100 * np.mean(list(lrp_fp_class.values())), 4) 317 | results['lrp_fn'] = np.around(100 * np.mean(list(lrp_fn_class.values())), 4) 318 | 319 | self._logger.info( 320 | "Evaluation results for {}: \n".format('LRP-' + iou_type) + create_small_table(results) 321 | ) 322 | if not np.isfinite(sum(results.values())): 323 | self._logger.info("Some metrics cannot be computed and is shown as NaN.") 324 | 325 | return results 326 | 327 | 328 | def instances_to_coco_json(instances, img_id): 329 | """ 330 | Dump an "Instances" object to a COCO-format json that's used for evaluation. 331 | 332 | Args: 333 | instances (Instances): 334 | img_id (int): the image id 335 | 336 | Returns: 337 | list[dict]: list of json annotations in COCO format. 338 | """ 339 | num_instance = len(instances) 340 | if num_instance == 0: 341 | return [] 342 | 343 | boxes = instances.pred_boxes.tensor.numpy() 344 | boxes = BoxMode.convert(boxes, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS) 345 | boxes = boxes.tolist() 346 | scores = instances.scores.tolist() 347 | classes = instances.pred_classes.tolist() 348 | 349 | has_mask = instances.has("pred_masks") 350 | if has_mask: 351 | # use RLE to encode the masks, because they are too large and takes memory 352 | # since this evaluator stores outputs of the entire dataset 353 | rles = [ 354 | mask_util.encode(np.array(mask[:, :, None], order="F", dtype="uint8"))[0] 355 | for mask in instances.pred_masks 356 | ] 357 | for rle in rles: 358 | # "counts" is an array encoded by mask_util as a byte-stream. Python3's 359 | # json writer which always produces strings cannot serialize a bytestream 360 | # unless you decode it. Thankfully, utf-8 works out (which is also what 361 | # the pycocotools/_mask.pyx does). 362 | rle["counts"] = rle["counts"].decode("utf-8") 363 | 364 | has_keypoints = instances.has("pred_keypoints") 365 | if has_keypoints: 366 | keypoints = instances.pred_keypoints 367 | 368 | results = [] 369 | for k in range(num_instance): 370 | result = { 371 | "image_id": img_id, 372 | "category_id": classes[k], 373 | "bbox": boxes[k], 374 | "score": scores[k], 375 | } 376 | if has_mask: 377 | result["segmentation"] = rles[k] 378 | if has_keypoints: 379 | # In COCO annotations, 380 | # keypoints coordinates are pixel indices. 381 | # However our predictions are floating point coordinates. 382 | # Therefore we subtract 0.5 to be consistent with the annotation format. 383 | # This is the inverse of data loading logic in `datasets/coco.py`. 384 | keypoints[k][:, :2] -= 0.5 385 | result["keypoints"] = keypoints[k].flatten().tolist() 386 | results.append(result) 387 | return results 388 | 389 | 390 | def _evaluate_predictions_on_coco(coco_gt, coco_results, iou_type, kpt_oks_sigmas=None): 391 | """ 392 | Evaluate the coco results using COCOEval API. 393 | """ 394 | assert len(coco_results) > 0 395 | 396 | if iou_type == "segm": 397 | coco_results = copy.deepcopy(coco_results) 398 | # When evaluating mask AP, if the results contain bbox, cocoapi will 399 | # use the box area as the area of the instance, instead of the mask area. 400 | # This leads to a different definition of small/medium/large. 401 | # We remove the bbox field to let mask AP use mask area. 402 | for c in coco_results: 403 | c.pop("bbox", None) 404 | 405 | coco_dt = coco_gt.loadRes(coco_results) 406 | coco_eval = COCOeval(coco_gt, coco_dt, iou_type) 407 | 408 | if iou_type == "keypoints": 409 | # Use the COCO default keypoint OKS sigmas unless overrides are specified 410 | if kpt_oks_sigmas: 411 | assert hasattr(coco_eval.params, "kpt_oks_sigmas"), "pycocotools is too old!" 412 | coco_eval.params.kpt_oks_sigmas = np.array(kpt_oks_sigmas) 413 | # COCOAPI requires every detection and every gt to have keypoints, so 414 | # we just take the first entry from both 415 | num_keypoints_dt = len(coco_results[0]["keypoints"]) // 3 416 | num_keypoints_gt = len(next(iter(coco_gt.anns.values()))["keypoints"]) // 3 417 | num_keypoints_oks = len(coco_eval.params.kpt_oks_sigmas) 418 | assert num_keypoints_oks == num_keypoints_dt == num_keypoints_gt, ( 419 | f"[LRPEvaluator] Prediction contain {num_keypoints_dt} keypoints. " 420 | f"Ground truth contains {num_keypoints_gt} keypoints. " 421 | f"The length of cfg.TEST.KEYPOINT_OKS_SIGMAS is {num_keypoints_oks}. " 422 | "They have to agree with each other. For meaning of OKS, please refer to " 423 | "http://cocodataset.org/#keypoints-eval." 424 | ) 425 | 426 | coco_eval.evaluate() 427 | coco_eval.accumulate() 428 | coco_eval.summarize() 429 | 430 | return coco_eval 431 | --------------------------------------------------------------------------------