├── .gitignore ├── LICENSE ├── README.md ├── commons ├── __init__.py ├── config.py └── fashion_config.py ├── data ├── __init__.py ├── data_loader.py └── fashion_data_set.py ├── examples ├── example_001.jpeg └── example_002.jpeg ├── experiment.py ├── experiments ├── __init__.py └── mask_rcnn_experiment.py ├── mrcnn ├── __init__.py ├── config.py ├── model.py ├── parallel_model.py ├── utils.py └── visualize.py ├── requirements.txt └── utils ├── __init__.py └── image_utils.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | MANIFEST 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .coverage 42 | .coverage.* 43 | .cache 44 | nosetests.xml 45 | coverage.xml 46 | *.cover 47 | .hypothesis/ 48 | .pytest_cache/ 49 | 50 | # Translations 51 | *.mo 52 | *.pot 53 | 54 | # Django stuff: 55 | *.log 56 | local_settings.py 57 | db.sqlite3 58 | 59 | # Flask stuff: 60 | instance/ 61 | .webassets-cache 62 | 63 | # Scrapy stuff: 64 | .scrapy 65 | 66 | # Sphinx documentation 67 | docs/_build/ 68 | 69 | # PyBuilder 70 | target/ 71 | 72 | # Jupyter Notebook 73 | .ipynb_checkpoints 74 | 75 | # pyenv 76 | .python-version 77 | 78 | # celery beat schedule file 79 | celerybeat-schedule 80 | 81 | # SageMath parsed files 82 | *.sage.py 83 | 84 | # Environments 85 | .env 86 | .venv 87 | env/ 88 | venv/ 89 | ENV/ 90 | env.bak/ 91 | venv.bak/ 92 | 93 | # Spyder project settings 94 | .spyderproject 95 | .spyproject 96 | 97 | # Rope project settings 98 | .ropeproject 99 | 100 | # mkdocs documentation 101 | /site 102 | 103 | # mypy 104 | .mypy_cache/ 105 | 106 | # IDE 107 | .idea -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 Cenk Corapci 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # fashion-parser 2 | [![ForTheBadge built-with-science](http://ForTheBadge.com/images/badges/built-with-science.svg)](https://GitHub.com/cenkcorapci/) 3 | 4 | [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) 5 | ![GitHub last commit](https://img.shields.io/github/last-commit/cenkcorapci/fashion-parser.svg) 6 | 7 | Fashion segmentation based on [Matterport's mask r-cnn implementation](https://github.com/matterport/Mask_RCNN) using 8 | [imaterialist-fashion-2019-FGVC6](https://www.kaggle.com/c/imaterialist-fashion-2019-FGVC6/overview) data set. 9 | 10 | ## Usage 11 | - Edit *commons/config.py* for workspace details, folder paths etc. 12 | - Hyper parameters are set in *commons/fashion_config.py* which is inherited from [Matterport's config](https://github.com/matterport/Mask_RCNN/blob/master/mrcnn/config.py) 13 | - Use *experiment.py* to train and create a submission file; 14 | ```bash 15 | python experiment.py --epochs --val_split 16 | ``` 17 | ## Example Results 18 | ![Alt text](examples/example_001.jpeg?raw=true "Title") 19 | ![Alt text](examples/example_002.jpeg?raw=true "Title") 20 | 21 | ## TODO 22 | - [X] Switch to Keras 23 | - [X] Change to Mask R-CNN 24 | - [X] Update Readme 25 | - [ ] Serve with Flask 26 | -------------------------------------------------------------------------------- /commons/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cenkcorapci/fashion-parser/c42b3a11eb90118e02e5a3f247264c9b6e4b4f77/commons/__init__.py -------------------------------------------------------------------------------- /commons/config.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """Model configs. 3 | """ 4 | 5 | import logging 6 | import pathlib 7 | 8 | # Logs 9 | logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO) 10 | LOGS_PATH = '/tmp/tb_logs/' 11 | 12 | # Experiments 13 | RANDOM_STATE = 41 14 | NUM_CATS = 46 15 | IMAGE_SIZE = 512 16 | 17 | # Local files 18 | TEMP_DATA_PATH = '/tmp/' 19 | FGVC6_DATA_SET_ROOT_PATH = '/run/media/twoaday/Elements/archive/data/fgvc6-fashion/imaterialist-fashion-2019-FGVC6/' 20 | # PRE_TRAINED_FASHION_WEIGHTS = '/run/media/twoaday/Elements/archive/data/fgvc6-fashion/imaterialist-fashion-2019-FGVC6/models/dl/fashion20190607T2317/mask_rcnn_fashion_0004.h5' 21 | PRE_TRAINED_FASHION_WEIGHTS = None 22 | DL_MODELS_PATH = FGVC6_DATA_SET_ROOT_PATH + 'models/dl/' 23 | 24 | FGVC6_TRAIN_CSV_PATH = '{0}train.csv' 25 | FGVC6_SUBMISSION_CSV_PATH = '{0}submission.csv'.format(FGVC6_DATA_SET_ROOT_PATH) 26 | FGVC6_SAMPLE_SUBMISSION_CSV_PATH = '{0}sample_submission.csv'.format(FGVC6_DATA_SET_ROOT_PATH) 27 | FGVC6_LABEL_DESCRIPTIONS_PATH = '{0}label_descriptions.json'.format(FGVC6_DATA_SET_ROOT_PATH) 28 | FGVC6_TRAIN_CSV_PATH = '{0}train.csv'.format(FGVC6_DATA_SET_ROOT_PATH) 29 | FGVC6_TRAIN_IMAGES_FOLDER_PATH = '{0}train/train/'.format(FGVC6_DATA_SET_ROOT_PATH) 30 | FGVC6_TEST_IMAGES_FOLDER_PATH = '{0}test/test/'.format(FGVC6_DATA_SET_ROOT_PATH) 31 | 32 | # create directories 33 | logging.info("Checking directories...") 34 | pathlib.Path(DL_MODELS_PATH).mkdir(parents=True, exist_ok=True) 35 | logging.info("Directories are set.") 36 | -------------------------------------------------------------------------------- /commons/fashion_config.py: -------------------------------------------------------------------------------- 1 | from commons.config import IMAGE_SIZE, NUM_CATS 2 | from mrcnn.config import Config 3 | 4 | 5 | class FashionConfig(Config): 6 | NAME = "fashion_resnet_101" 7 | NUM_CLASSES = NUM_CATS + 1 # +1 for the background class 8 | 9 | GPU_COUNT = 1 10 | IMAGES_PER_GPU = 2 # a memory error occurs when IMAGES_PER_GPU is too high 11 | STEPS_PER_EPOCH = 10000000 # use all the data 12 | VALIDATION_STEPS = 100000 13 | BACKBONE = 'resnet101' 14 | 15 | IMAGE_MIN_DIM = IMAGE_SIZE 16 | IMAGE_MAX_DIM = IMAGE_SIZE 17 | IMAGE_RESIZE_MODE = 'none' 18 | 19 | RPN_ANCHOR_SCALES = (16, 32, 64, 128, 256) 20 | 21 | 22 | class InferenceConfig(FashionConfig): 23 | GPU_COUNT = 1 24 | IMAGES_PER_GPU = 1 25 | -------------------------------------------------------------------------------- /data/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cenkcorapci/fashion-parser/c42b3a11eb90118e02e5a3f247264c9b6e4b4f77/data/__init__.py -------------------------------------------------------------------------------- /data/data_loader.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | import pandas as pd 4 | 5 | from commons.config import * 6 | 7 | 8 | class DataLoader: 9 | def __init__(self): 10 | with open(FGVC6_LABEL_DESCRIPTIONS_PATH) as json_file: 11 | data = json.load(json_file) 12 | logging.info('Loading label descriptions from: \n {0}'.format(data['info'])) 13 | 14 | self.label_names = [x['name'] for x in data['categories']] 15 | self._segment_df = pd.read_csv(FGVC6_TRAIN_CSV_PATH) 16 | self._segment_df['CategoryId'] = self._segment_df['ClassId'].str.split('_').str[0] 17 | self.image_df = self._segment_df.groupby('ImageId')['EncodedPixels', 'CategoryId'].agg(lambda x: list(x)) 18 | self.size_df = self._segment_df.groupby('ImageId')['Height', 'Width'].mean() 19 | self.image_df = self.image_df.join(self.size_df, on='ImageId') 20 | -------------------------------------------------------------------------------- /data/fashion_data_set.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | 4 | from commons.config import IMAGE_SIZE, FGVC6_TRAIN_IMAGES_FOLDER_PATH 5 | from data.data_loader import DataLoader 6 | from mrcnn import utils 7 | from utils.image_utils import resize_image 8 | 9 | 10 | class FashionDataset(utils.Dataset): 11 | 12 | def __init__(self, df, label_names): 13 | super().__init__(self) 14 | self._label_names = label_names 15 | 16 | # Add classes 17 | for index, name in enumerate(self._label_names): 18 | self.add_class("fashion", index + 1, name) 19 | 20 | # Add images 21 | for _, row in df.iterrows(): 22 | self.add_image("fashion", 23 | image_id=row.name, 24 | path='{0}{1}'.format(FGVC6_TRAIN_IMAGES_FOLDER_PATH, row.name), 25 | labels=row['CategoryId'], 26 | annotations=row['EncodedPixels'], 27 | height=row['Height'], width=row['Width']) 28 | 29 | def image_reference(self, image_id): 30 | info = self.image_info[image_id] 31 | return info['path'], [self._label_names[int(x)] for x in info['labels']] 32 | 33 | def load_image(self, image_id): 34 | return resize_image(self.image_info[image_id]['path']) 35 | 36 | def load_mask(self, image_id): 37 | info = self.image_info[image_id] 38 | 39 | mask = np.zeros((IMAGE_SIZE, IMAGE_SIZE, len(info['annotations'])), dtype=np.uint8) 40 | labels = [] 41 | 42 | for m, (annotation, label) in enumerate(zip(info['annotations'], info['labels'])): 43 | sub_mask = np.full(info['height'] * info['width'], 0, dtype=np.uint8) 44 | annotation = [int(x) for x in annotation.split(' ')] 45 | 46 | for i, start_pixel in enumerate(annotation[::2]): 47 | sub_mask[start_pixel: start_pixel + annotation[2 * i + 1]] = 1 48 | 49 | sub_mask = sub_mask.reshape((info['height'], info['width']), order='F') 50 | sub_mask = cv2.resize(sub_mask, (IMAGE_SIZE, IMAGE_SIZE), interpolation=cv2.INTER_NEAREST) 51 | 52 | mask[:, :, m] = sub_mask 53 | labels.append(int(label) + 1) 54 | 55 | return mask, np.array(labels) 56 | 57 | 58 | if __name__ == '__main__': 59 | 60 | import random 61 | from mrcnn import visualize 62 | 63 | loader = DataLoader() 64 | dataset = FashionDataset(loader.image_df.sample(1000), loader.label_names) 65 | dataset.prepare() 66 | 67 | for i in range(6): 68 | image_id = random.choice(dataset.image_ids) 69 | print(dataset.image_reference(image_id)) 70 | 71 | image = dataset.load_image(image_id) 72 | mask, class_ids = dataset.load_mask(image_id) 73 | visualize.display_top_masks(image, mask, class_ids, dataset.class_names, limit=4) 74 | -------------------------------------------------------------------------------- /examples/example_001.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cenkcorapci/fashion-parser/c42b3a11eb90118e02e5a3f247264c9b6e4b4f77/examples/example_001.jpeg -------------------------------------------------------------------------------- /examples/example_002.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cenkcorapci/fashion-parser/c42b3a11eb90118e02e5a3f247264c9b6e4b4f77/examples/example_002.jpeg -------------------------------------------------------------------------------- /experiment.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | from experiments.mask_rcnn_experiment import MaskRCNNExperiment 4 | 5 | usage_docs = """ 6 | --epochs Number of epochs 7 | --val_split Set validation split(between 0 and 1) 8 | """ 9 | 10 | parser = argparse.ArgumentParser(usage=usage_docs) 11 | 12 | parser.add_argument('--epochs', type=int, default=10) 13 | parser.add_argument('--val_split', type=float, default=.1) 14 | 15 | args = parser.parse_args() 16 | 17 | experiment = MaskRCNNExperiment(nb_epochs=args.epochs, val_split=args.val_split) 18 | 19 | experiment.train_model() 20 | experiment.set_to_inference_mode() 21 | experiment.predict() 22 | experiment.visualize() 23 | 24 | -------------------------------------------------------------------------------- /experiments/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cenkcorapci/fashion-parser/c42b3a11eb90118e02e5a3f247264c9b6e4b4f77/experiments/__init__.py -------------------------------------------------------------------------------- /experiments/mask_rcnn_experiment.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from __future__ import print_function, division 3 | 4 | import cv2 5 | import pandas as pd 6 | from imgaug import augmenters as iaa 7 | from keras.callbacks import * 8 | from sklearn.model_selection import train_test_split 9 | from tqdm import tqdm 10 | 11 | import mrcnn.model as modellib 12 | from commons.config import * 13 | from commons.fashion_config import FashionConfig, InferenceConfig 14 | from data.data_loader import DataLoader 15 | from data.fashion_data_set import FashionDataset 16 | from mrcnn import visualize 17 | from utils.image_utils import resize_image, refine_masks, to_rle 18 | 19 | 20 | class MaskRCNNExperiment: 21 | def __init__(self, 22 | val_split=0.1, 23 | nb_epochs=3, 24 | learning_rate=0.01): 25 | 26 | self._in_inference_mode = False 27 | self._nb_epochs = nb_epochs 28 | self._lr = learning_rate 29 | self._model_name = 'mask_r_cnn_fashion_resnet_101' 30 | 31 | logging.info("Getting data set") 32 | loader = DataLoader() 33 | df = loader.image_df 34 | 35 | logging.info("Splitting {0} samples for validation".format(float(len(df)) * val_split)) 36 | 37 | self._class_names = loader.label_names 38 | self._train_data_set, self._val_data_set = train_test_split(df, random_state=RANDOM_STATE, test_size=val_split) 39 | train_size = len(self._train_data_set) 40 | val_size = len(self._val_data_set) 41 | 42 | self._train_data_set = FashionDataset(self._train_data_set, self._class_names) 43 | self._val_data_set = FashionDataset(self._val_data_set, self._class_names) 44 | 45 | self._train_data_set.prepare() 46 | self._val_data_set.prepare() 47 | self._sample_df = pd.read_csv(FGVC6_SAMPLE_SUBMISSION_CSV_PATH) 48 | 49 | self._augmentation = iaa.Sequential([ 50 | iaa.Fliplr(.5), # horizontal flip 51 | iaa.Flipud(.5) # vertical flip 52 | ]) 53 | self._model_config = FashionConfig() 54 | self._model_config.STEPS_PER_EPOCH = train_size / self._model_config.IMAGES_PER_GPU 55 | self._model_config.VALIDATION_STEPS = val_size / self._model_config.IMAGES_PER_GPU 56 | # load model 57 | self._model = modellib.MaskRCNN(mode='training', 58 | config=self._model_config, 59 | model_dir=DL_MODELS_PATH) 60 | if PRE_TRAINED_FASHION_WEIGHTS is not None: 61 | self._model.load_weights(PRE_TRAINED_FASHION_WEIGHTS, 62 | by_name=True, 63 | exclude=['mrcnn_class_logits', 'mrcnn_bbox_fc', 'mrcnn_bbox', 'mrcnn_mask']) 64 | 65 | es_cb = EarlyStopping(monitor='val_mrcnn_mask_loss', patience=2) 66 | 67 | self._callbacks = [es_cb] 68 | 69 | def train_model(self): 70 | self._model.train(self._train_data_set, self._val_data_set, 71 | learning_rate=self._lr, 72 | epochs=self._nb_epochs, 73 | layers='heads', 74 | custom_callbacks=self._callbacks, 75 | augmentation=self._augmentation) 76 | 77 | def set_to_train_mode(self): 78 | self._model = modellib.MaskRCNN(mode='training', config=self._model_config, model_dir=DL_MODELS_PATH) 79 | self._model.load_weights(PRE_TRAINED_FASHION_WEIGHTS, 80 | by_name=True, 81 | exclude=['mrcnn_class_logits', 'mrcnn_bbox_fc', 'mrcnn_bbox', 'mrcnn_mask']) 82 | self._in_inference_mode = False 83 | 84 | def set_to_inference_mode(self, model_weights_path=PRE_TRAINED_FASHION_WEIGHTS): 85 | inf_conf = InferenceConfig() 86 | self._model = modellib.MaskRCNN(mode='inference', 87 | config=inf_conf, 88 | model_dir=DL_MODELS_PATH) 89 | self._model.load_weights(model_weights_path, 90 | by_name=True) 91 | self._in_inference_mode = True 92 | 93 | def predict(self): 94 | sub_list = [] 95 | missing_count = 0 96 | for i, row in tqdm(self._sample_df.iterrows(), total=len(self._sample_df)): 97 | image = resize_image('{0}{1}'.format(FGVC6_TEST_IMAGES_FOLDER_PATH, row['ImageId'])) 98 | result = self._model.detect([image])[0] 99 | if result['masks'].size > 0: 100 | masks, _ = refine_masks(result['masks'], result['rois']) 101 | for m in range(masks.shape[-1]): 102 | mask = masks[:, :, m].ravel(order='F') 103 | rle = to_rle(mask) 104 | label = result['class_ids'][m] - 1 105 | sub_list.append([row['ImageId'], ' '.join(list(map(str, rle))), label]) 106 | else: 107 | # The system does not allow missing ids, this is an easy way to fill them 108 | sub_list.append([row['ImageId'], '1 1', 23]) 109 | missing_count += 1 110 | 111 | submission_df = pd.DataFrame(sub_list, columns=self._sample_df.columns.values) 112 | print("Total image results: ", submission_df['ImageId'].nunique()) 113 | print("Missing Images: ", missing_count) 114 | submission_df.to_csv(FGVC6_SUBMISSION_CSV_PATH, index=False) 115 | 116 | submission_df.head() 117 | 118 | def visualize(self): 119 | for i in range(9): 120 | image_id = self._sample_df.sample()['ImageId'].values[0] 121 | image_path = str('{0}{1}'.format(FGVC6_TEST_IMAGES_FOLDER_PATH, image_id)) 122 | 123 | img = cv2.imread(image_path) 124 | img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) 125 | 126 | result = self._model.detect([resize_image(image_path)]) 127 | r = result[0] 128 | 129 | if r['masks'].size > 0: 130 | masks = np.zeros((img.shape[0], img.shape[1], r['masks'].shape[-1]), dtype=np.uint8) 131 | for m in range(r['masks'].shape[-1]): 132 | masks[:, :, m] = cv2.resize(r['masks'][:, :, m].astype('uint8'), 133 | (img.shape[1], img.shape[0]), interpolation=cv2.INTER_NEAREST) 134 | 135 | y_scale = img.shape[0] / IMAGE_SIZE 136 | x_scale = img.shape[1] / IMAGE_SIZE 137 | rois = (r['rois'] * [y_scale, x_scale, y_scale, x_scale]).astype(int) 138 | 139 | masks, rois = refine_masks(masks, rois) 140 | else: 141 | masks, rois = r['masks'], r['rois'] 142 | 143 | visualize.display_instances(img, rois, masks, r['class_ids'], 144 | ['bg'] + self._class_names, r['scores'], 145 | title=image_id, figsize=(12, 12)) 146 | 147 | 148 | if __name__ == "__main__": 149 | experiment = MaskRCNNExperiment(nb_epochs=2, val_split=.1) 150 | experiment.set_to_inference_mode() 151 | experiment.predict() 152 | experiment.visualize() 153 | -------------------------------------------------------------------------------- /mrcnn/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /mrcnn/config.py: -------------------------------------------------------------------------------- 1 | """ 2 | Mask R-CNN 3 | Base Configurations class. 4 | 5 | Copyright (c) 2017 Matterport, Inc. 6 | Licensed under the MIT License (see LICENSE for details) 7 | Written by Waleed Abdulla 8 | """ 9 | 10 | import numpy as np 11 | 12 | 13 | # Base Configuration Class 14 | # Don't use this class directly. Instead, sub-class it and override 15 | # the configurations you need to change. 16 | 17 | class Config(object): 18 | """Base configuration class. For custom configurations, create a 19 | sub-class that inherits from this one and override properties 20 | that need to be changed. 21 | """ 22 | # Name the configurations. For example, 'COCO', 'Experiment 3', ...etc. 23 | # Useful if your code needs to do things differently depending on which 24 | # experiment is running. 25 | NAME = None # Override in sub-classes 26 | 27 | # NUMBER OF GPUs to use. When using only a CPU, this needs to be set to 1. 28 | GPU_COUNT = 1 29 | 30 | # Number of images to train with on each GPU. A 12GB GPU can typically 31 | # handle 2 images of 1024x1024px. 32 | # Adjust based on your GPU memory and image sizes. Use the highest 33 | # number that your GPU can handle for best performance. 34 | IMAGES_PER_GPU = 2 35 | 36 | # Number of training steps per epoch 37 | # This doesn't need to match the size of the training set. Tensorboard 38 | # updates are saved at the end of each epoch, so setting this to a 39 | # smaller number means getting more frequent TensorBoard updates. 40 | # Validation stats are also calculated at each epoch end and they 41 | # might take a while, so don't set this too small to avoid spending 42 | # a lot of time on validation stats. 43 | STEPS_PER_EPOCH = 1000 44 | 45 | # Number of validation steps to run at the end of every training epoch. 46 | # A bigger number improves accuracy of validation stats, but slows 47 | # down the training. 48 | VALIDATION_STEPS = 50 49 | 50 | # Backbone network architecture 51 | # Supported values are: resnet50, resnet101. 52 | # You can also provide a callable that should have the signature 53 | # of model.resnet_graph. If you do so, you need to supply a callable 54 | # to COMPUTE_BACKBONE_SHAPE as well 55 | BACKBONE = "resnet101" 56 | 57 | # Only useful if you supply a callable to BACKBONE. Should compute 58 | # the shape of each layer of the FPN Pyramid. 59 | # See model.compute_backbone_shapes 60 | COMPUTE_BACKBONE_SHAPE = None 61 | 62 | # The strides of each layer of the FPN Pyramid. These values 63 | # are based on a Resnet101 backbone. 64 | BACKBONE_STRIDES = [4, 8, 16, 32, 64] 65 | 66 | # Size of the fully-connected layers in the classification graph 67 | FPN_CLASSIF_FC_LAYERS_SIZE = 1024 68 | 69 | # Size of the top-down layers used to build the feature pyramid 70 | TOP_DOWN_PYRAMID_SIZE = 256 71 | 72 | # Number of classification classes (including background) 73 | NUM_CLASSES = 1 # Override in sub-classes 74 | 75 | # Length of square anchor side in pixels 76 | RPN_ANCHOR_SCALES = (32, 64, 128, 256, 512) 77 | 78 | # Ratios of anchors at each cell (width/height) 79 | # A value of 1 represents a square anchor, and 0.5 is a wide anchor 80 | RPN_ANCHOR_RATIOS = [0.5, 1, 2] 81 | 82 | # Anchor stride 83 | # If 1 then anchors are created for each cell in the backbone feature map. 84 | # If 2, then anchors are created for every other cell, and so on. 85 | RPN_ANCHOR_STRIDE = 1 86 | 87 | # Non-max suppression threshold to filter RPN proposals. 88 | # You can increase this during training to generate more propsals. 89 | RPN_NMS_THRESHOLD = 0.7 90 | 91 | # How many anchors per image to use for RPN training 92 | RPN_TRAIN_ANCHORS_PER_IMAGE = 256 93 | 94 | # ROIs kept after tf.nn.top_k and before non-maximum suppression 95 | PRE_NMS_LIMIT = 6000 96 | 97 | # ROIs kept after non-maximum suppression (training and inference) 98 | POST_NMS_ROIS_TRAINING = 2000 99 | POST_NMS_ROIS_INFERENCE = 1000 100 | 101 | # If enabled, resizes instance masks to a smaller size to reduce 102 | # memory load. Recommended when using high-resolution images. 103 | USE_MINI_MASK = True 104 | MINI_MASK_SHAPE = (56, 56) # (height, width) of the mini-mask 105 | 106 | # Input image resizing 107 | # Generally, use the "square" resizing mode for training and predicting 108 | # and it should work well in most cases. In this mode, images are scaled 109 | # up such that the small side is = IMAGE_MIN_DIM, but ensuring that the 110 | # scaling doesn't make the long side > IMAGE_MAX_DIM. Then the image is 111 | # padded with zeros to make it a square so multiple images can be put 112 | # in one batch. 113 | # Available resizing modes: 114 | # none: No resizing or padding. Return the image unchanged. 115 | # square: Resize and pad with zeros to get a square image 116 | # of size [max_dim, max_dim]. 117 | # pad64: Pads width and height with zeros to make them multiples of 64. 118 | # If IMAGE_MIN_DIM or IMAGE_MIN_SCALE are not None, then it scales 119 | # up before padding. IMAGE_MAX_DIM is ignored in this mode. 120 | # The multiple of 64 is needed to ensure smooth scaling of feature 121 | # maps up and down the 6 levels of the FPN pyramid (2**6=64). 122 | # crop: Picks random crops from the image. First, scales the image based 123 | # on IMAGE_MIN_DIM and IMAGE_MIN_SCALE, then picks a random crop of 124 | # size IMAGE_MIN_DIM x IMAGE_MIN_DIM. Can be used in training only. 125 | # IMAGE_MAX_DIM is not used in this mode. 126 | IMAGE_RESIZE_MODE = "square" 127 | IMAGE_MIN_DIM = 800 128 | IMAGE_MAX_DIM = 1024 129 | # Minimum scaling ratio. Checked after MIN_IMAGE_DIM and can force further 130 | # up scaling. For example, if set to 2 then images are scaled up to double 131 | # the width and height, or more, even if MIN_IMAGE_DIM doesn't require it. 132 | # However, in 'square' mode, it can be overruled by IMAGE_MAX_DIM. 133 | IMAGE_MIN_SCALE = 0 134 | # Number of color channels per image. RGB = 3, grayscale = 1, RGB-D = 4 135 | # Changing this requires other changes in the code. See the WIKI for more 136 | # details: https://github.com/matterport/Mask_RCNN/wiki 137 | IMAGE_CHANNEL_COUNT = 3 138 | 139 | # Image mean (RGB) 140 | MEAN_PIXEL = np.array([123.7, 116.8, 103.9]) 141 | 142 | # Number of ROIs per image to feed to classifier/mask heads 143 | # The Mask RCNN paper uses 512 but often the RPN doesn't generate 144 | # enough positive proposals to fill this and keep a positive:negative 145 | # ratio of 1:3. You can increase the number of proposals by adjusting 146 | # the RPN NMS threshold. 147 | TRAIN_ROIS_PER_IMAGE = 200 148 | 149 | # Percent of positive ROIs used to train classifier/mask heads 150 | ROI_POSITIVE_RATIO = 0.33 151 | 152 | # Pooled ROIs 153 | POOL_SIZE = 7 154 | MASK_POOL_SIZE = 14 155 | 156 | # Shape of output mask 157 | # To change this you also need to change the neural network mask branch 158 | MASK_SHAPE = [28, 28] 159 | 160 | # Maximum number of ground truth instances to use in one image 161 | MAX_GT_INSTANCES = 100 162 | 163 | # Bounding box refinement standard deviation for RPN and final detections. 164 | RPN_BBOX_STD_DEV = np.array([0.1, 0.1, 0.2, 0.2]) 165 | BBOX_STD_DEV = np.array([0.1, 0.1, 0.2, 0.2]) 166 | 167 | # Max number of final detections 168 | DETECTION_MAX_INSTANCES = 100 169 | 170 | # Minimum probability value to accept a detected instance 171 | # ROIs below this threshold are skipped 172 | DETECTION_MIN_CONFIDENCE = 0.7 173 | 174 | # Non-maximum suppression threshold for detection 175 | DETECTION_NMS_THRESHOLD = 0.3 176 | 177 | # Learning rate and momentum 178 | # The Mask RCNN paper uses lr=0.02, but on TensorFlow it causes 179 | # weights to explode. Likely due to differences in optimizer 180 | # implementation. 181 | LEARNING_RATE = 0.001 182 | LEARNING_MOMENTUM = 0.9 183 | 184 | # Weight decay regularization 185 | WEIGHT_DECAY = 0.0001 186 | 187 | # Loss weights for more precise optimization. 188 | # Can be used for R-CNN training setup. 189 | LOSS_WEIGHTS = { 190 | "rpn_class_loss": 1., 191 | "rpn_bbox_loss": 1., 192 | "mrcnn_class_loss": 1., 193 | "mrcnn_bbox_loss": 1., 194 | "mrcnn_mask_loss": 1. 195 | } 196 | 197 | # Use RPN ROIs or externally generated ROIs for training 198 | # Keep this True for most situations. Set to False if you want to train 199 | # the head branches on ROI generated by code rather than the ROIs from 200 | # the RPN. For example, to debug the classifier head without having to 201 | # train the RPN. 202 | USE_RPN_ROIS = True 203 | 204 | # Train or freeze batch normalization layers 205 | # None: Train BN layers. This is the normal mode 206 | # False: Freeze BN layers. Good when using a small batch size 207 | # True: (don't use). Set layer in training mode even when predicting 208 | TRAIN_BN = False # Defaulting to False since batch size is often small 209 | 210 | # Gradient norm clipping 211 | GRADIENT_CLIP_NORM = 5.0 212 | 213 | def __init__(self): 214 | """Set values of computed attributes.""" 215 | # Effective batch size 216 | self.BATCH_SIZE = self.IMAGES_PER_GPU * self.GPU_COUNT 217 | 218 | # Input image size 219 | if self.IMAGE_RESIZE_MODE == "crop": 220 | self.IMAGE_SHAPE = np.array([self.IMAGE_MIN_DIM, self.IMAGE_MIN_DIM, 221 | self.IMAGE_CHANNEL_COUNT]) 222 | else: 223 | self.IMAGE_SHAPE = np.array([self.IMAGE_MAX_DIM, self.IMAGE_MAX_DIM, 224 | self.IMAGE_CHANNEL_COUNT]) 225 | 226 | # Image meta data length 227 | # See compose_image_meta() for details 228 | self.IMAGE_META_SIZE = 1 + 3 + 3 + 4 + 1 + self.NUM_CLASSES 229 | 230 | def display(self): 231 | """Display Configuration values.""" 232 | print("\nConfigurations:") 233 | for a in dir(self): 234 | if not a.startswith("__") and not callable(getattr(self, a)): 235 | print("{:30} {}".format(a, getattr(self, a))) 236 | print("\n") 237 | -------------------------------------------------------------------------------- /mrcnn/parallel_model.py: -------------------------------------------------------------------------------- 1 | """ 2 | Mask R-CNN 3 | Multi-GPU Support for Keras. 4 | 5 | Copyright (c) 2017 Matterport, Inc. 6 | Licensed under the MIT License (see LICENSE for details) 7 | Written by Waleed Abdulla 8 | 9 | Ideas and a small code snippets from these sources: 10 | https://github.com/fchollet/keras/issues/2436 11 | https://medium.com/@kuza55/transparent-multi-gpu-training-on-tensorflow-with-keras-8b0016fd9012 12 | https://github.com/avolkov1/keras_experiments/blob/master/keras_exp/multigpu/ 13 | https://github.com/fchollet/keras/blob/master/keras/utils/training_utils.py 14 | """ 15 | 16 | import tensorflow as tf 17 | import keras.backend as K 18 | import keras.layers as KL 19 | import keras.models as KM 20 | 21 | 22 | class ParallelModel(KM.Model): 23 | """Subclasses the standard Keras Model and adds multi-GPU support. 24 | It works by creating a copy of the model on each GPU. Then it slices 25 | the inputs and sends a slice to each copy of the model, and then 26 | merges the outputs together and applies the loss on the combined 27 | outputs. 28 | """ 29 | 30 | def __init__(self, keras_model, gpu_count): 31 | """Class constructor. 32 | keras_model: The Keras model to parallelize 33 | gpu_count: Number of GPUs. Must be > 1 34 | """ 35 | self.inner_model = keras_model 36 | self.gpu_count = gpu_count 37 | merged_outputs = self.make_parallel() 38 | super(ParallelModel, self).__init__(inputs=self.inner_model.inputs, 39 | outputs=merged_outputs) 40 | 41 | def __getattribute__(self, attrname): 42 | """Redirect loading and saving methods to the inner model. That's where 43 | the weights are stored.""" 44 | if 'load' in attrname or 'save' in attrname: 45 | return getattr(self.inner_model, attrname) 46 | return super(ParallelModel, self).__getattribute__(attrname) 47 | 48 | def summary(self, *args, **kwargs): 49 | """Override summary() to display summaries of both, the wrapper 50 | and inner models.""" 51 | super(ParallelModel, self).summary(*args, **kwargs) 52 | self.inner_model.summary(*args, **kwargs) 53 | 54 | def make_parallel(self): 55 | """Creates a new wrapper model that consists of multiple replicas of 56 | the original model placed on different GPUs. 57 | """ 58 | # Slice inputs. Slice inputs on the CPU to avoid sending a copy 59 | # of the full inputs to all GPUs. Saves on bandwidth and memory. 60 | input_slices = {name: tf.split(x, self.gpu_count) 61 | for name, x in zip(self.inner_model.input_names, 62 | self.inner_model.inputs)} 63 | 64 | output_names = self.inner_model.output_names 65 | outputs_all = [] 66 | for i in range(len(self.inner_model.outputs)): 67 | outputs_all.append([]) 68 | 69 | # Run the model call() on each GPU to place the ops there 70 | for i in range(self.gpu_count): 71 | with tf.device('/gpu:%d' % i): 72 | with tf.name_scope('tower_%d' % i): 73 | # Run a slice of inputs through this replica 74 | zipped_inputs = zip(self.inner_model.input_names, 75 | self.inner_model.inputs) 76 | inputs = [ 77 | KL.Lambda(lambda s: input_slices[name][i], 78 | output_shape=lambda s: (None,) + s[1:])(tensor) 79 | for name, tensor in zipped_inputs] 80 | # Create the model replica and get the outputs 81 | outputs = self.inner_model(inputs) 82 | if not isinstance(outputs, list): 83 | outputs = [outputs] 84 | # Save the outputs for merging back together later 85 | for l, o in enumerate(outputs): 86 | outputs_all[l].append(o) 87 | 88 | # Merge outputs on CPU 89 | with tf.device('/cpu:0'): 90 | merged = [] 91 | for outputs, name in zip(outputs_all, output_names): 92 | # Concatenate or average outputs? 93 | # Outputs usually have a batch dimension and we concatenate 94 | # across it. If they don't, then the output is likely a loss 95 | # or a metric value that gets averaged across the batch. 96 | # Keras expects losses and metrics to be scalars. 97 | if K.int_shape(outputs[0]) == (): 98 | # Average 99 | m = KL.Lambda(lambda o: tf.add_n(o) / len(outputs), name=name)(outputs) 100 | else: 101 | # Concatenate 102 | m = KL.Concatenate(axis=0, name=name)(outputs) 103 | merged.append(m) 104 | return merged 105 | 106 | 107 | if __name__ == "__main__": 108 | # Testing code below. It creates a simple model to train on MNIST and 109 | # tries to run it on 2 GPUs. It saves the graph so it can be viewed 110 | # in TensorBoard. Run it as: 111 | # 112 | # python3 parallel_model.py 113 | 114 | import os 115 | import numpy as np 116 | import keras.optimizers 117 | from keras.datasets import mnist 118 | from keras.preprocessing.image import ImageDataGenerator 119 | 120 | GPU_COUNT = 2 121 | 122 | # Root directory of the project 123 | ROOT_DIR = os.path.abspath("../") 124 | 125 | # Directory to save logs and trained model 126 | MODEL_DIR = os.path.join(ROOT_DIR, "logs") 127 | 128 | def build_model(x_train, num_classes): 129 | # Reset default graph. Keras leaves old ops in the graph, 130 | # which are ignored for execution but clutter graph 131 | # visualization in TensorBoard. 132 | tf.reset_default_graph() 133 | 134 | inputs = KL.Input(shape=x_train.shape[1:], name="input_image") 135 | x = KL.Conv2D(32, (3, 3), activation='relu', padding="same", 136 | name="conv1")(inputs) 137 | x = KL.Conv2D(64, (3, 3), activation='relu', padding="same", 138 | name="conv2")(x) 139 | x = KL.MaxPooling2D(pool_size=(2, 2), name="pool1")(x) 140 | x = KL.Flatten(name="flat1")(x) 141 | x = KL.Dense(128, activation='relu', name="dense1")(x) 142 | x = KL.Dense(num_classes, activation='softmax', name="dense2")(x) 143 | 144 | return KM.Model(inputs, x, "digit_classifier_model") 145 | 146 | # Load MNIST Data 147 | (x_train, y_train), (x_test, y_test) = mnist.load_data() 148 | x_train = np.expand_dims(x_train, -1).astype('float32') / 255 149 | x_test = np.expand_dims(x_test, -1).astype('float32') / 255 150 | 151 | print('x_train shape:', x_train.shape) 152 | print('x_test shape:', x_test.shape) 153 | 154 | # Build data generator and model 155 | datagen = ImageDataGenerator() 156 | model = build_model(x_train, 10) 157 | 158 | # Add multi-GPU support. 159 | model = ParallelModel(model, GPU_COUNT) 160 | 161 | optimizer = keras.optimizers.SGD(lr=0.01, momentum=0.9, clipnorm=5.0) 162 | 163 | model.compile(loss='sparse_categorical_crossentropy', 164 | optimizer=optimizer, metrics=['accuracy']) 165 | 166 | model.summary() 167 | 168 | # Train 169 | model.fit_generator( 170 | datagen.flow(x_train, y_train, batch_size=64), 171 | steps_per_epoch=50, epochs=10, verbose=1, 172 | validation_data=(x_test, y_test), 173 | callbacks=[keras.callbacks.TensorBoard(log_dir=MODEL_DIR, 174 | write_graph=True)] 175 | ) 176 | -------------------------------------------------------------------------------- /mrcnn/utils.py: -------------------------------------------------------------------------------- 1 | """ 2 | Mask R-CNN 3 | Common utility functions and classes. 4 | 5 | Copyright (c) 2017 Matterport, Inc. 6 | Licensed under the MIT License (see LICENSE for details) 7 | Written by Waleed Abdulla 8 | """ 9 | 10 | import sys 11 | import os 12 | import logging 13 | import math 14 | import random 15 | import numpy as np 16 | import tensorflow as tf 17 | import scipy 18 | import skimage.color 19 | import skimage.io 20 | import skimage.transform 21 | import urllib.request 22 | import shutil 23 | import warnings 24 | from distutils.version import LooseVersion 25 | 26 | # URL from which to download the latest COCO trained weights 27 | COCO_MODEL_URL = "https://github.com/matterport/Mask_RCNN/releases/download/v2.0/mask_rcnn_coco.h5" 28 | 29 | 30 | ############################################################ 31 | # Bounding Boxes 32 | ############################################################ 33 | 34 | def extract_bboxes(mask): 35 | """Compute bounding boxes from masks. 36 | mask: [height, width, num_instances]. Mask pixels are either 1 or 0. 37 | 38 | Returns: bbox array [num_instances, (y1, x1, y2, x2)]. 39 | """ 40 | boxes = np.zeros([mask.shape[-1], 4], dtype=np.int32) 41 | for i in range(mask.shape[-1]): 42 | m = mask[:, :, i] 43 | # Bounding box. 44 | horizontal_indicies = np.where(np.any(m, axis=0))[0] 45 | vertical_indicies = np.where(np.any(m, axis=1))[0] 46 | if horizontal_indicies.shape[0]: 47 | x1, x2 = horizontal_indicies[[0, -1]] 48 | y1, y2 = vertical_indicies[[0, -1]] 49 | # x2 and y2 should not be part of the box. Increment by 1. 50 | x2 += 1 51 | y2 += 1 52 | else: 53 | # No mask for this instance. Might happen due to 54 | # resizing or cropping. Set bbox to zeros 55 | x1, x2, y1, y2 = 0, 0, 0, 0 56 | boxes[i] = np.array([y1, x1, y2, x2]) 57 | return boxes.astype(np.int32) 58 | 59 | 60 | def compute_iou(box, boxes, box_area, boxes_area): 61 | """Calculates IoU of the given box with the array of the given boxes. 62 | box: 1D vector [y1, x1, y2, x2] 63 | boxes: [boxes_count, (y1, x1, y2, x2)] 64 | box_area: float. the area of 'box' 65 | boxes_area: array of length boxes_count. 66 | 67 | Note: the areas are passed in rather than calculated here for 68 | efficiency. Calculate once in the caller to avoid duplicate work. 69 | """ 70 | # Calculate intersection areas 71 | y1 = np.maximum(box[0], boxes[:, 0]) 72 | y2 = np.minimum(box[2], boxes[:, 2]) 73 | x1 = np.maximum(box[1], boxes[:, 1]) 74 | x2 = np.minimum(box[3], boxes[:, 3]) 75 | intersection = np.maximum(x2 - x1, 0) * np.maximum(y2 - y1, 0) 76 | union = box_area + boxes_area[:] - intersection[:] 77 | iou = intersection / union 78 | return iou 79 | 80 | 81 | def compute_overlaps(boxes1, boxes2): 82 | """Computes IoU overlaps between two sets of boxes. 83 | boxes1, boxes2: [N, (y1, x1, y2, x2)]. 84 | 85 | For better performance, pass the largest set first and the smaller second. 86 | """ 87 | # Areas of anchors and GT boxes 88 | area1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1]) 89 | area2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1]) 90 | 91 | # Compute overlaps to generate matrix [boxes1 count, boxes2 count] 92 | # Each cell contains the IoU value. 93 | overlaps = np.zeros((boxes1.shape[0], boxes2.shape[0])) 94 | for i in range(overlaps.shape[1]): 95 | box2 = boxes2[i] 96 | overlaps[:, i] = compute_iou(box2, boxes1, area2[i], area1) 97 | return overlaps 98 | 99 | 100 | def compute_overlaps_masks(masks1, masks2): 101 | """Computes IoU overlaps between two sets of masks. 102 | masks1, masks2: [Height, Width, instances] 103 | """ 104 | 105 | # If either set of masks is empty return empty result 106 | if masks1.shape[-1] == 0 or masks2.shape[-1] == 0: 107 | return np.zeros((masks1.shape[-1], masks2.shape[-1])) 108 | # flatten masks and compute their areas 109 | masks1 = np.reshape(masks1 > .5, (-1, masks1.shape[-1])).astype(np.float32) 110 | masks2 = np.reshape(masks2 > .5, (-1, masks2.shape[-1])).astype(np.float32) 111 | area1 = np.sum(masks1, axis=0) 112 | area2 = np.sum(masks2, axis=0) 113 | 114 | # intersections and union 115 | intersections = np.dot(masks1.T, masks2) 116 | union = area1[:, None] + area2[None, :] - intersections 117 | overlaps = intersections / union 118 | 119 | return overlaps 120 | 121 | 122 | def non_max_suppression(boxes, scores, threshold): 123 | """Performs non-maximum suppression and returns indices of kept boxes. 124 | boxes: [N, (y1, x1, y2, x2)]. Notice that (y2, x2) lays outside the box. 125 | scores: 1-D array of box scores. 126 | threshold: Float. IoU threshold to use for filtering. 127 | """ 128 | assert boxes.shape[0] > 0 129 | if boxes.dtype.kind != "f": 130 | boxes = boxes.astype(np.float32) 131 | 132 | # Compute box areas 133 | y1 = boxes[:, 0] 134 | x1 = boxes[:, 1] 135 | y2 = boxes[:, 2] 136 | x2 = boxes[:, 3] 137 | area = (y2 - y1) * (x2 - x1) 138 | 139 | # Get indicies of boxes sorted by scores (highest first) 140 | ixs = scores.argsort()[::-1] 141 | 142 | pick = [] 143 | while len(ixs) > 0: 144 | # Pick top box and add its index to the list 145 | i = ixs[0] 146 | pick.append(i) 147 | # Compute IoU of the picked box with the rest 148 | iou = compute_iou(boxes[i], boxes[ixs[1:]], area[i], area[ixs[1:]]) 149 | # Identify boxes with IoU over the threshold. This 150 | # returns indices into ixs[1:], so add 1 to get 151 | # indices into ixs. 152 | remove_ixs = np.where(iou > threshold)[0] + 1 153 | # Remove indices of the picked and overlapped boxes. 154 | ixs = np.delete(ixs, remove_ixs) 155 | ixs = np.delete(ixs, 0) 156 | return np.array(pick, dtype=np.int32) 157 | 158 | 159 | def apply_box_deltas(boxes, deltas): 160 | """Applies the given deltas to the given boxes. 161 | boxes: [N, (y1, x1, y2, x2)]. Note that (y2, x2) is outside the box. 162 | deltas: [N, (dy, dx, log(dh), log(dw))] 163 | """ 164 | boxes = boxes.astype(np.float32) 165 | # Convert to y, x, h, w 166 | height = boxes[:, 2] - boxes[:, 0] 167 | width = boxes[:, 3] - boxes[:, 1] 168 | center_y = boxes[:, 0] + 0.5 * height 169 | center_x = boxes[:, 1] + 0.5 * width 170 | # Apply deltas 171 | center_y += deltas[:, 0] * height 172 | center_x += deltas[:, 1] * width 173 | height *= np.exp(deltas[:, 2]) 174 | width *= np.exp(deltas[:, 3]) 175 | # Convert back to y1, x1, y2, x2 176 | y1 = center_y - 0.5 * height 177 | x1 = center_x - 0.5 * width 178 | y2 = y1 + height 179 | x2 = x1 + width 180 | return np.stack([y1, x1, y2, x2], axis=1) 181 | 182 | 183 | def box_refinement_graph(box, gt_box): 184 | """Compute refinement needed to transform box to gt_box. 185 | box and gt_box are [N, (y1, x1, y2, x2)] 186 | """ 187 | box = tf.cast(box, tf.float32) 188 | gt_box = tf.cast(gt_box, tf.float32) 189 | 190 | height = box[:, 2] - box[:, 0] 191 | width = box[:, 3] - box[:, 1] 192 | center_y = box[:, 0] + 0.5 * height 193 | center_x = box[:, 1] + 0.5 * width 194 | 195 | gt_height = gt_box[:, 2] - gt_box[:, 0] 196 | gt_width = gt_box[:, 3] - gt_box[:, 1] 197 | gt_center_y = gt_box[:, 0] + 0.5 * gt_height 198 | gt_center_x = gt_box[:, 1] + 0.5 * gt_width 199 | 200 | dy = (gt_center_y - center_y) / height 201 | dx = (gt_center_x - center_x) / width 202 | dh = tf.log(gt_height / height) 203 | dw = tf.log(gt_width / width) 204 | 205 | result = tf.stack([dy, dx, dh, dw], axis=1) 206 | return result 207 | 208 | 209 | def box_refinement(box, gt_box): 210 | """Compute refinement needed to transform box to gt_box. 211 | box and gt_box are [N, (y1, x1, y2, x2)]. (y2, x2) is 212 | assumed to be outside the box. 213 | """ 214 | box = box.astype(np.float32) 215 | gt_box = gt_box.astype(np.float32) 216 | 217 | height = box[:, 2] - box[:, 0] 218 | width = box[:, 3] - box[:, 1] 219 | center_y = box[:, 0] + 0.5 * height 220 | center_x = box[:, 1] + 0.5 * width 221 | 222 | gt_height = gt_box[:, 2] - gt_box[:, 0] 223 | gt_width = gt_box[:, 3] - gt_box[:, 1] 224 | gt_center_y = gt_box[:, 0] + 0.5 * gt_height 225 | gt_center_x = gt_box[:, 1] + 0.5 * gt_width 226 | 227 | dy = (gt_center_y - center_y) / height 228 | dx = (gt_center_x - center_x) / width 229 | dh = np.log(gt_height / height) 230 | dw = np.log(gt_width / width) 231 | 232 | return np.stack([dy, dx, dh, dw], axis=1) 233 | 234 | 235 | ############################################################ 236 | # Dataset 237 | ############################################################ 238 | 239 | class Dataset(object): 240 | """The base class for dataset classes. 241 | To use it, create a new class that adds functions specific to the dataset 242 | you want to use. For example: 243 | 244 | class CatsAndDogsDataset(Dataset): 245 | def load_cats_and_dogs(self): 246 | ... 247 | def load_mask(self, image_id): 248 | ... 249 | def image_reference(self, image_id): 250 | ... 251 | 252 | See COCODataset and ShapesDataset as examples. 253 | """ 254 | 255 | def __init__(self, class_map=None): 256 | self._image_ids = [] 257 | self.image_info = [] 258 | # Background is always the first class 259 | self.class_info = [{"source": "", "id": 0, "name": "BG"}] 260 | self.source_class_ids = {} 261 | 262 | def add_class(self, source, class_id, class_name): 263 | assert "." not in source, "Source name cannot contain a dot" 264 | # Does the class exist already? 265 | for info in self.class_info: 266 | if info['source'] == source and info["id"] == class_id: 267 | # source.class_id combination already available, skip 268 | return 269 | # Add the class 270 | self.class_info.append({ 271 | "source": source, 272 | "id": class_id, 273 | "name": class_name, 274 | }) 275 | 276 | def add_image(self, source, image_id, path, **kwargs): 277 | image_info = { 278 | "id": image_id, 279 | "source": source, 280 | "path": path, 281 | } 282 | image_info.update(kwargs) 283 | self.image_info.append(image_info) 284 | 285 | def image_reference(self, image_id): 286 | """Return a link to the image in its source Website or details about 287 | the image that help looking it up or debugging it. 288 | 289 | Override for your dataset, but pass to this function 290 | if you encounter images not in your dataset. 291 | """ 292 | return "" 293 | 294 | def prepare(self): 295 | """Prepares the Dataset class for use. 296 | """ 297 | 298 | def clean_name(name): 299 | """Returns a shorter version of object names for cleaner display.""" 300 | return ",".join(name.split(",")[:1]) 301 | 302 | # Build (or rebuild) everything else from the info dicts. 303 | self.num_classes = len(self.class_info) 304 | self.class_ids = np.arange(self.num_classes) 305 | self.class_names = [clean_name(c["name"]) for c in self.class_info] 306 | self.num_images = len(self.image_info) 307 | self._image_ids = np.arange(self.num_images) 308 | 309 | # Mapping from source class and image IDs to internal IDs 310 | self.class_from_source_map = {"{}.{}".format(info['source'], info['id']): id 311 | for info, id in zip(self.class_info, self.class_ids)} 312 | self.image_from_source_map = {"{}.{}".format(info['source'], info['id']): id 313 | for info, id in zip(self.image_info, self.image_ids)} 314 | 315 | # Map sources to class_ids they support 316 | self.sources = list(set([i['source'] for i in self.class_info])) 317 | self.source_class_ids = {} 318 | # Loop over datasets 319 | for source in self.sources: 320 | self.source_class_ids[source] = [] 321 | # Find classes that belong to this dataset 322 | for i, info in enumerate(self.class_info): 323 | # Include BG class in all datasets 324 | if i == 0 or source == info['source']: 325 | self.source_class_ids[source].append(i) 326 | 327 | def map_source_class_id(self, source_class_id): 328 | """Takes a source class ID and returns the int class ID assigned to it. 329 | 330 | For example: 331 | dataset.map_source_class_id("coco.12") -> 23 332 | """ 333 | return self.class_from_source_map[source_class_id] 334 | 335 | def get_source_class_id(self, class_id, source): 336 | """Map an internal class ID to the corresponding class ID in the source dataset.""" 337 | info = self.class_info[class_id] 338 | assert info['source'] == source 339 | return info['id'] 340 | 341 | @property 342 | def image_ids(self): 343 | return self._image_ids 344 | 345 | def source_image_link(self, image_id): 346 | """Returns the path or URL to the image. 347 | Override this to return a URL to the image if it's available online for easy 348 | debugging. 349 | """ 350 | return self.image_info[image_id]["path"] 351 | 352 | def load_image(self, image_id): 353 | """Load the specified image and return a [H,W,3] Numpy array. 354 | """ 355 | # Load image 356 | image = skimage.io.imread(self.image_info[image_id]['path']) 357 | # If grayscale. Convert to RGB for consistency. 358 | if image.ndim != 3: 359 | image = skimage.color.gray2rgb(image) 360 | # If has an alpha channel, remove it for consistency 361 | if image.shape[-1] == 4: 362 | image = image[..., :3] 363 | return image 364 | 365 | def load_mask(self, image_id): 366 | """Load instance masks for the given image. 367 | 368 | Different datasets use different ways to store masks. Override this 369 | method to load instance masks and return them in the form of am 370 | array of binary masks of shape [height, width, instances]. 371 | 372 | Returns: 373 | masks: A bool array of shape [height, width, instance count] with 374 | a binary mask per instance. 375 | class_ids: a 1D array of class IDs of the instance masks. 376 | """ 377 | # Override this function to load a mask from your dataset. 378 | # Otherwise, it returns an empty mask. 379 | logging.warning("You are using the default load_mask(), maybe you need to define your own one.") 380 | mask = np.empty([0, 0, 0]) 381 | class_ids = np.empty([0], np.int32) 382 | return mask, class_ids 383 | 384 | 385 | def resize_image(image, min_dim=None, max_dim=None, min_scale=None, mode="square"): 386 | """Resizes an image keeping the aspect ratio unchanged. 387 | 388 | min_dim: if provided, resizes the image such that it's smaller 389 | dimension == min_dim 390 | max_dim: if provided, ensures that the image longest side doesn't 391 | exceed this value. 392 | min_scale: if provided, ensure that the image is scaled up by at least 393 | this percent even if min_dim doesn't require it. 394 | mode: Resizing mode. 395 | none: No resizing. Return the image unchanged. 396 | square: Resize and pad with zeros to get a square image 397 | of size [max_dim, max_dim]. 398 | pad64: Pads width and height with zeros to make them multiples of 64. 399 | If min_dim or min_scale are provided, it scales the image up 400 | before padding. max_dim is ignored in this mode. 401 | The multiple of 64 is needed to ensure smooth scaling of feature 402 | maps up and down the 6 levels of the FPN pyramid (2**6=64). 403 | crop: Picks random crops from the image. First, scales the image based 404 | on min_dim and min_scale, then picks a random crop of 405 | size min_dim x min_dim. Can be used in training only. 406 | max_dim is not used in this mode. 407 | 408 | Returns: 409 | image: the resized image 410 | window: (y1, x1, y2, x2). If max_dim is provided, padding might 411 | be inserted in the returned image. If so, this window is the 412 | coordinates of the image part of the full image (excluding 413 | the padding). The x2, y2 pixels are not included. 414 | scale: The scale factor used to resize the image 415 | padding: Padding added to the image [(top, bottom), (left, right), (0, 0)] 416 | """ 417 | # Keep track of image dtype and return results in the same dtype 418 | image_dtype = image.dtype 419 | # Default window (y1, x1, y2, x2) and default scale == 1. 420 | h, w = image.shape[:2] 421 | window = (0, 0, h, w) 422 | scale = 1 423 | padding = [(0, 0), (0, 0), (0, 0)] 424 | crop = None 425 | 426 | if mode == "none": 427 | return image, window, scale, padding, crop 428 | 429 | # Scale? 430 | if min_dim: 431 | # Scale up but not down 432 | scale = max(1, min_dim / min(h, w)) 433 | if min_scale and scale < min_scale: 434 | scale = min_scale 435 | 436 | # Does it exceed max dim? 437 | if max_dim and mode == "square": 438 | image_max = max(h, w) 439 | if round(image_max * scale) > max_dim: 440 | scale = max_dim / image_max 441 | 442 | # Resize image using bilinear interpolation 443 | if scale != 1: 444 | image = resize(image, (round(h * scale), round(w * scale)), 445 | preserve_range=True) 446 | 447 | # Need padding or cropping? 448 | if mode == "square": 449 | # Get new height and width 450 | h, w = image.shape[:2] 451 | top_pad = (max_dim - h) // 2 452 | bottom_pad = max_dim - h - top_pad 453 | left_pad = (max_dim - w) // 2 454 | right_pad = max_dim - w - left_pad 455 | padding = [(top_pad, bottom_pad), (left_pad, right_pad), (0, 0)] 456 | image = np.pad(image, padding, mode='constant', constant_values=0) 457 | window = (top_pad, left_pad, h + top_pad, w + left_pad) 458 | elif mode == "pad64": 459 | h, w = image.shape[:2] 460 | # Both sides must be divisible by 64 461 | assert min_dim % 64 == 0, "Minimum dimension must be a multiple of 64" 462 | # Height 463 | if h % 64 > 0: 464 | max_h = h - (h % 64) + 64 465 | top_pad = (max_h - h) // 2 466 | bottom_pad = max_h - h - top_pad 467 | else: 468 | top_pad = bottom_pad = 0 469 | # Width 470 | if w % 64 > 0: 471 | max_w = w - (w % 64) + 64 472 | left_pad = (max_w - w) // 2 473 | right_pad = max_w - w - left_pad 474 | else: 475 | left_pad = right_pad = 0 476 | padding = [(top_pad, bottom_pad), (left_pad, right_pad), (0, 0)] 477 | image = np.pad(image, padding, mode='constant', constant_values=0) 478 | window = (top_pad, left_pad, h + top_pad, w + left_pad) 479 | elif mode == "crop": 480 | # Pick a random crop 481 | h, w = image.shape[:2] 482 | y = random.randint(0, (h - min_dim)) 483 | x = random.randint(0, (w - min_dim)) 484 | crop = (y, x, min_dim, min_dim) 485 | image = image[y:y + min_dim, x:x + min_dim] 486 | window = (0, 0, min_dim, min_dim) 487 | else: 488 | raise Exception("Mode {} not supported".format(mode)) 489 | return image.astype(image_dtype), window, scale, padding, crop 490 | 491 | 492 | def resize_mask(mask, scale, padding, crop=None): 493 | """Resizes a mask using the given scale and padding. 494 | Typically, you get the scale and padding from resize_image() to 495 | ensure both, the image and the mask, are resized consistently. 496 | 497 | scale: mask scaling factor 498 | padding: Padding to add to the mask in the form 499 | [(top, bottom), (left, right), (0, 0)] 500 | """ 501 | # Suppress warning from scipy 0.13.0, the output shape of zoom() is 502 | # calculated with round() instead of int() 503 | with warnings.catch_warnings(): 504 | warnings.simplefilter("ignore") 505 | mask = scipy.ndimage.zoom(mask, zoom=[scale, scale, 1], order=0) 506 | if crop is not None: 507 | y, x, h, w = crop 508 | mask = mask[y:y + h, x:x + w] 509 | else: 510 | mask = np.pad(mask, padding, mode='constant', constant_values=0) 511 | return mask 512 | 513 | 514 | def minimize_mask(bbox, mask, mini_shape): 515 | """Resize masks to a smaller version to reduce memory load. 516 | Mini-masks can be resized back to image scale using expand_masks() 517 | 518 | See inspect_data.ipynb notebook for more details. 519 | """ 520 | mini_mask = np.zeros(mini_shape + (mask.shape[-1],), dtype=bool) 521 | for i in range(mask.shape[-1]): 522 | # Pick slice and cast to bool in case load_mask() returned wrong dtype 523 | m = mask[:, :, i].astype(bool) 524 | y1, x1, y2, x2 = bbox[i][:4] 525 | m = m[y1:y2, x1:x2] 526 | if m.size == 0: 527 | raise Exception("Invalid bounding box with area of zero") 528 | # Resize with bilinear interpolation 529 | m = resize(m, mini_shape) 530 | mini_mask[:, :, i] = np.around(m).astype(np.bool) 531 | return mini_mask 532 | 533 | 534 | def expand_mask(bbox, mini_mask, image_shape): 535 | """Resizes mini masks back to image size. Reverses the change 536 | of minimize_mask(). 537 | 538 | See inspect_data.ipynb notebook for more details. 539 | """ 540 | mask = np.zeros(image_shape[:2] + (mini_mask.shape[-1],), dtype=bool) 541 | for i in range(mask.shape[-1]): 542 | m = mini_mask[:, :, i] 543 | y1, x1, y2, x2 = bbox[i][:4] 544 | h = y2 - y1 545 | w = x2 - x1 546 | # Resize with bilinear interpolation 547 | m = resize(m, (h, w)) 548 | mask[y1:y2, x1:x2, i] = np.around(m).astype(np.bool) 549 | return mask 550 | 551 | 552 | # TODO: Build and use this function to reduce code duplication 553 | def mold_mask(mask, config): 554 | pass 555 | 556 | 557 | def unmold_mask(mask, bbox, image_shape): 558 | """Converts a mask generated by the neural network to a format similar 559 | to its original shape. 560 | mask: [height, width] of type float. A small, typically 28x28 mask. 561 | bbox: [y1, x1, y2, x2]. The box to fit the mask in. 562 | 563 | Returns a binary mask with the same size as the original image. 564 | """ 565 | threshold = 0.5 566 | y1, x1, y2, x2 = bbox 567 | mask = resize(mask, (y2 - y1, x2 - x1)) 568 | mask = np.where(mask >= threshold, 1, 0).astype(np.bool) 569 | 570 | # Put the mask in the right location. 571 | full_mask = np.zeros(image_shape[:2], dtype=np.bool) 572 | full_mask[y1:y2, x1:x2] = mask 573 | return full_mask 574 | 575 | 576 | ############################################################ 577 | # Anchors 578 | ############################################################ 579 | 580 | def generate_anchors(scales, ratios, shape, feature_stride, anchor_stride): 581 | """ 582 | scales: 1D array of anchor sizes in pixels. Example: [32, 64, 128] 583 | ratios: 1D array of anchor ratios of width/height. Example: [0.5, 1, 2] 584 | shape: [height, width] spatial shape of the feature map over which 585 | to generate anchors. 586 | feature_stride: Stride of the feature map relative to the image in pixels. 587 | anchor_stride: Stride of anchors on the feature map. For example, if the 588 | value is 2 then generate anchors for every other feature map pixel. 589 | """ 590 | # Get all combinations of scales and ratios 591 | scales, ratios = np.meshgrid(np.array(scales), np.array(ratios)) 592 | scales = scales.flatten() 593 | ratios = ratios.flatten() 594 | 595 | # Enumerate heights and widths from scales and ratios 596 | heights = scales / np.sqrt(ratios) 597 | widths = scales * np.sqrt(ratios) 598 | 599 | # Enumerate shifts in feature space 600 | shifts_y = np.arange(0, shape[0], anchor_stride) * feature_stride 601 | shifts_x = np.arange(0, shape[1], anchor_stride) * feature_stride 602 | shifts_x, shifts_y = np.meshgrid(shifts_x, shifts_y) 603 | 604 | # Enumerate combinations of shifts, widths, and heights 605 | box_widths, box_centers_x = np.meshgrid(widths, shifts_x) 606 | box_heights, box_centers_y = np.meshgrid(heights, shifts_y) 607 | 608 | # Reshape to get a list of (y, x) and a list of (h, w) 609 | box_centers = np.stack( 610 | [box_centers_y, box_centers_x], axis=2).reshape([-1, 2]) 611 | box_sizes = np.stack([box_heights, box_widths], axis=2).reshape([-1, 2]) 612 | 613 | # Convert to corner coordinates (y1, x1, y2, x2) 614 | boxes = np.concatenate([box_centers - 0.5 * box_sizes, 615 | box_centers + 0.5 * box_sizes], axis=1) 616 | return boxes 617 | 618 | 619 | def generate_pyramid_anchors(scales, ratios, feature_shapes, feature_strides, 620 | anchor_stride): 621 | """Generate anchors at different levels of a feature pyramid. Each scale 622 | is associated with a level of the pyramid, but each ratio is used in 623 | all levels of the pyramid. 624 | 625 | Returns: 626 | anchors: [N, (y1, x1, y2, x2)]. All generated anchors in one array. Sorted 627 | with the same order of the given scales. So, anchors of scale[0] come 628 | first, then anchors of scale[1], and so on. 629 | """ 630 | # Anchors 631 | # [anchor_count, (y1, x1, y2, x2)] 632 | anchors = [] 633 | for i in range(len(scales)): 634 | anchors.append(generate_anchors(scales[i], ratios, feature_shapes[i], 635 | feature_strides[i], anchor_stride)) 636 | return np.concatenate(anchors, axis=0) 637 | 638 | 639 | ############################################################ 640 | # Miscellaneous 641 | ############################################################ 642 | 643 | def trim_zeros(x): 644 | """It's common to have tensors larger than the available data and 645 | pad with zeros. This function removes rows that are all zeros. 646 | 647 | x: [rows, columns]. 648 | """ 649 | assert len(x.shape) == 2 650 | return x[~np.all(x == 0, axis=1)] 651 | 652 | 653 | def compute_matches(gt_boxes, gt_class_ids, gt_masks, 654 | pred_boxes, pred_class_ids, pred_scores, pred_masks, 655 | iou_threshold=0.5, score_threshold=0.0): 656 | """Finds matches between prediction and ground truth instances. 657 | 658 | Returns: 659 | gt_match: 1-D array. For each GT box it has the index of the matched 660 | predicted box. 661 | pred_match: 1-D array. For each predicted box, it has the index of 662 | the matched ground truth box. 663 | overlaps: [pred_boxes, gt_boxes] IoU overlaps. 664 | """ 665 | # Trim zero padding 666 | # TODO: cleaner to do zero unpadding upstream 667 | gt_boxes = trim_zeros(gt_boxes) 668 | gt_masks = gt_masks[..., :gt_boxes.shape[0]] 669 | pred_boxes = trim_zeros(pred_boxes) 670 | pred_scores = pred_scores[:pred_boxes.shape[0]] 671 | # Sort predictions by score from high to low 672 | indices = np.argsort(pred_scores)[::-1] 673 | pred_boxes = pred_boxes[indices] 674 | pred_class_ids = pred_class_ids[indices] 675 | pred_scores = pred_scores[indices] 676 | pred_masks = pred_masks[..., indices] 677 | 678 | # Compute IoU overlaps [pred_masks, gt_masks] 679 | overlaps = compute_overlaps_masks(pred_masks, gt_masks) 680 | 681 | # Loop through predictions and find matching ground truth boxes 682 | match_count = 0 683 | pred_match = -1 * np.ones([pred_boxes.shape[0]]) 684 | gt_match = -1 * np.ones([gt_boxes.shape[0]]) 685 | for i in range(len(pred_boxes)): 686 | # Find best matching ground truth box 687 | # 1. Sort matches by score 688 | sorted_ixs = np.argsort(overlaps[i])[::-1] 689 | # 2. Remove low scores 690 | low_score_idx = np.where(overlaps[i, sorted_ixs] < score_threshold)[0] 691 | if low_score_idx.size > 0: 692 | sorted_ixs = sorted_ixs[:low_score_idx[0]] 693 | # 3. Find the match 694 | for j in sorted_ixs: 695 | # If ground truth box is already matched, go to next one 696 | if gt_match[j] > -1: 697 | continue 698 | # If we reach IoU smaller than the threshold, end the loop 699 | iou = overlaps[i, j] 700 | if iou < iou_threshold: 701 | break 702 | # Do we have a match? 703 | if pred_class_ids[i] == gt_class_ids[j]: 704 | match_count += 1 705 | gt_match[j] = i 706 | pred_match[i] = j 707 | break 708 | 709 | return gt_match, pred_match, overlaps 710 | 711 | 712 | def compute_ap(gt_boxes, gt_class_ids, gt_masks, 713 | pred_boxes, pred_class_ids, pred_scores, pred_masks, 714 | iou_threshold=0.5): 715 | """Compute Average Precision at a set IoU threshold (default 0.5). 716 | 717 | Returns: 718 | mAP: Mean Average Precision 719 | precisions: List of precisions at different class score thresholds. 720 | recalls: List of recall values at different class score thresholds. 721 | overlaps: [pred_boxes, gt_boxes] IoU overlaps. 722 | """ 723 | # Get matches and overlaps 724 | gt_match, pred_match, overlaps = compute_matches( 725 | gt_boxes, gt_class_ids, gt_masks, 726 | pred_boxes, pred_class_ids, pred_scores, pred_masks, 727 | iou_threshold) 728 | 729 | # Compute precision and recall at each prediction box step 730 | precisions = np.cumsum(pred_match > -1) / (np.arange(len(pred_match)) + 1) 731 | recalls = np.cumsum(pred_match > -1).astype(np.float32) / len(gt_match) 732 | 733 | # Pad with start and end values to simplify the math 734 | precisions = np.concatenate([[0], precisions, [0]]) 735 | recalls = np.concatenate([[0], recalls, [1]]) 736 | 737 | # Ensure precision values decrease but don't increase. This way, the 738 | # precision value at each recall threshold is the maximum it can be 739 | # for all following recall thresholds, as specified by the VOC paper. 740 | for i in range(len(precisions) - 2, -1, -1): 741 | precisions[i] = np.maximum(precisions[i], precisions[i + 1]) 742 | 743 | # Compute mean AP over recall range 744 | indices = np.where(recalls[:-1] != recalls[1:])[0] + 1 745 | mAP = np.sum((recalls[indices] - recalls[indices - 1]) * 746 | precisions[indices]) 747 | 748 | return mAP, precisions, recalls, overlaps 749 | 750 | 751 | def compute_ap_range(gt_box, gt_class_id, gt_mask, 752 | pred_box, pred_class_id, pred_score, pred_mask, 753 | iou_thresholds=None, verbose=1): 754 | """Compute AP over a range or IoU thresholds. Default range is 0.5-0.95.""" 755 | # Default is 0.5 to 0.95 with increments of 0.05 756 | iou_thresholds = iou_thresholds or np.arange(0.5, 1.0, 0.05) 757 | 758 | # Compute AP over range of IoU thresholds 759 | AP = [] 760 | for iou_threshold in iou_thresholds: 761 | ap, precisions, recalls, overlaps =\ 762 | compute_ap(gt_box, gt_class_id, gt_mask, 763 | pred_box, pred_class_id, pred_score, pred_mask, 764 | iou_threshold=iou_threshold) 765 | if verbose: 766 | print("AP @{:.2f}:\t {:.3f}".format(iou_threshold, ap)) 767 | AP.append(ap) 768 | AP = np.array(AP).mean() 769 | if verbose: 770 | print("AP @{:.2f}-{:.2f}:\t {:.3f}".format( 771 | iou_thresholds[0], iou_thresholds[-1], AP)) 772 | return AP 773 | 774 | 775 | def compute_recall(pred_boxes, gt_boxes, iou): 776 | """Compute the recall at the given IoU threshold. It's an indication 777 | of how many GT boxes were found by the given prediction boxes. 778 | 779 | pred_boxes: [N, (y1, x1, y2, x2)] in image coordinates 780 | gt_boxes: [N, (y1, x1, y2, x2)] in image coordinates 781 | """ 782 | # Measure overlaps 783 | overlaps = compute_overlaps(pred_boxes, gt_boxes) 784 | iou_max = np.max(overlaps, axis=1) 785 | iou_argmax = np.argmax(overlaps, axis=1) 786 | positive_ids = np.where(iou_max >= iou)[0] 787 | matched_gt_boxes = iou_argmax[positive_ids] 788 | 789 | recall = len(set(matched_gt_boxes)) / gt_boxes.shape[0] 790 | return recall, positive_ids 791 | 792 | 793 | # ## Batch Slicing 794 | # Some custom layers support a batch size of 1 only, and require a lot of work 795 | # to support batches greater than 1. This function slices an input tensor 796 | # across the batch dimension and feeds batches of size 1. Effectively, 797 | # an easy way to support batches > 1 quickly with little code modification. 798 | # In the long run, it's more efficient to modify the code to support large 799 | # batches and getting rid of this function. Consider this a temporary solution 800 | def batch_slice(inputs, graph_fn, batch_size, names=None): 801 | """Splits inputs into slices and feeds each slice to a copy of the given 802 | computation graph and then combines the results. It allows you to run a 803 | graph on a batch of inputs even if the graph is written to support one 804 | instance only. 805 | 806 | inputs: list of tensors. All must have the same first dimension length 807 | graph_fn: A function that returns a TF tensor that's part of a graph. 808 | batch_size: number of slices to divide the data into. 809 | names: If provided, assigns names to the resulting tensors. 810 | """ 811 | if not isinstance(inputs, list): 812 | inputs = [inputs] 813 | 814 | outputs = [] 815 | for i in range(batch_size): 816 | inputs_slice = [x[i] for x in inputs] 817 | output_slice = graph_fn(*inputs_slice) 818 | if not isinstance(output_slice, (tuple, list)): 819 | output_slice = [output_slice] 820 | outputs.append(output_slice) 821 | # Change outputs from a list of slices where each is 822 | # a list of outputs to a list of outputs and each has 823 | # a list of slices 824 | outputs = list(zip(*outputs)) 825 | 826 | if names is None: 827 | names = [None] * len(outputs) 828 | 829 | result = [tf.stack(o, axis=0, name=n) 830 | for o, n in zip(outputs, names)] 831 | if len(result) == 1: 832 | result = result[0] 833 | 834 | return result 835 | 836 | 837 | def download_trained_weights(coco_model_path, verbose=1): 838 | """Download COCO trained weights from Releases. 839 | 840 | coco_model_path: local path of COCO trained weights 841 | """ 842 | if verbose > 0: 843 | print("Downloading pretrained model to " + coco_model_path + " ...") 844 | with urllib.request.urlopen(COCO_MODEL_URL) as resp, open(coco_model_path, 'wb') as out: 845 | shutil.copyfileobj(resp, out) 846 | if verbose > 0: 847 | print("... done downloading pretrained model!") 848 | 849 | 850 | def norm_boxes(boxes, shape): 851 | """Converts boxes from pixel coordinates to normalized coordinates. 852 | boxes: [N, (y1, x1, y2, x2)] in pixel coordinates 853 | shape: [..., (height, width)] in pixels 854 | 855 | Note: In pixel coordinates (y2, x2) is outside the box. But in normalized 856 | coordinates it's inside the box. 857 | 858 | Returns: 859 | [N, (y1, x1, y2, x2)] in normalized coordinates 860 | """ 861 | h, w = shape 862 | scale = np.array([h - 1, w - 1, h - 1, w - 1]) 863 | shift = np.array([0, 0, 1, 1]) 864 | return np.divide((boxes - shift), scale).astype(np.float32) 865 | 866 | 867 | def denorm_boxes(boxes, shape): 868 | """Converts boxes from normalized coordinates to pixel coordinates. 869 | boxes: [N, (y1, x1, y2, x2)] in normalized coordinates 870 | shape: [..., (height, width)] in pixels 871 | 872 | Note: In pixel coordinates (y2, x2) is outside the box. But in normalized 873 | coordinates it's inside the box. 874 | 875 | Returns: 876 | [N, (y1, x1, y2, x2)] in pixel coordinates 877 | """ 878 | h, w = shape 879 | scale = np.array([h - 1, w - 1, h - 1, w - 1]) 880 | shift = np.array([0, 0, 1, 1]) 881 | return np.around(np.multiply(boxes, scale) + shift).astype(np.int32) 882 | 883 | 884 | def resize(image, output_shape, order=1, mode='constant', cval=0, clip=True, 885 | preserve_range=False, anti_aliasing=False, anti_aliasing_sigma=None): 886 | """A wrapper for Scikit-Image resize(). 887 | 888 | Scikit-Image generates warnings on every call to resize() if it doesn't 889 | receive the right parameters. The right parameters depend on the version 890 | of skimage. This solves the problem by using different parameters per 891 | version. And it provides a central place to control resizing defaults. 892 | """ 893 | if LooseVersion(skimage.__version__) >= LooseVersion("0.14"): 894 | # New in 0.14: anti_aliasing. Default it to False for backward 895 | # compatibility with skimage 0.13. 896 | return skimage.transform.resize( 897 | image, output_shape, 898 | order=order, mode=mode, cval=cval, clip=clip, 899 | preserve_range=preserve_range, anti_aliasing=anti_aliasing, 900 | anti_aliasing_sigma=anti_aliasing_sigma) 901 | else: 902 | return skimage.transform.resize( 903 | image, output_shape, 904 | order=order, mode=mode, cval=cval, clip=clip, 905 | preserve_range=preserve_range) 906 | -------------------------------------------------------------------------------- /mrcnn/visualize.py: -------------------------------------------------------------------------------- 1 | """ 2 | Mask R-CNN 3 | Display and Visualization Functions. 4 | 5 | Copyright (c) 2017 Matterport, Inc. 6 | Licensed under the MIT License (see LICENSE for details) 7 | Written by Waleed Abdulla 8 | """ 9 | 10 | import os 11 | import sys 12 | import random 13 | import itertools 14 | import colorsys 15 | 16 | import numpy as np 17 | from skimage.measure import find_contours 18 | import matplotlib.pyplot as plt 19 | from matplotlib import patches, lines 20 | from matplotlib.patches import Polygon 21 | import IPython.display 22 | 23 | # Root directory of the project 24 | ROOT_DIR = os.path.abspath("../") 25 | 26 | # Import Mask RCNN 27 | sys.path.append(ROOT_DIR) # To find local version of the library 28 | from mrcnn import utils 29 | 30 | 31 | ############################################################ 32 | # Visualization 33 | ############################################################ 34 | 35 | def display_images(images, titles=None, cols=4, cmap=None, norm=None, 36 | interpolation=None): 37 | """Display the given set of images, optionally with titles. 38 | images: list or array of image tensors in HWC format. 39 | titles: optional. A list of titles to display with each image. 40 | cols: number of images per row 41 | cmap: Optional. Color map to use. For example, "Blues". 42 | norm: Optional. A Normalize instance to map values to colors. 43 | interpolation: Optional. Image interpolation to use for display. 44 | """ 45 | titles = titles if titles is not None else [""] * len(images) 46 | rows = len(images) // cols + 1 47 | plt.figure(figsize=(14, 14 * rows // cols)) 48 | i = 1 49 | for image, title in zip(images, titles): 50 | plt.subplot(rows, cols, i) 51 | plt.title(title, fontsize=9) 52 | plt.axis('off') 53 | plt.imshow(image.astype(np.uint8), cmap=cmap, 54 | norm=norm, interpolation=interpolation) 55 | i += 1 56 | plt.show() 57 | 58 | 59 | def random_colors(N, bright=True): 60 | """ 61 | Generate random colors. 62 | To get visually distinct colors, generate them in HSV space then 63 | convert to RGB. 64 | """ 65 | brightness = 1.0 if bright else 0.7 66 | hsv = [(i / N, 1, brightness) for i in range(N)] 67 | colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv)) 68 | random.shuffle(colors) 69 | return colors 70 | 71 | 72 | def apply_mask(image, mask, color, alpha=0.5): 73 | """Apply the given mask to the image. 74 | """ 75 | for c in range(3): 76 | image[:, :, c] = np.where(mask == 1, 77 | image[:, :, c] * 78 | (1 - alpha) + alpha * color[c] * 255, 79 | image[:, :, c]) 80 | return image 81 | 82 | 83 | def display_instances(image, boxes, masks, class_ids, class_names, 84 | scores=None, title="", 85 | figsize=(16, 16), ax=None, 86 | show_mask=True, show_bbox=True, 87 | colors=None, captions=None): 88 | """ 89 | boxes: [num_instance, (y1, x1, y2, x2, class_id)] in image coordinates. 90 | masks: [height, width, num_instances] 91 | class_ids: [num_instances] 92 | class_names: list of class names of the dataset 93 | scores: (optional) confidence scores for each box 94 | title: (optional) Figure title 95 | show_mask, show_bbox: To show masks and bounding boxes or not 96 | figsize: (optional) the size of the image 97 | colors: (optional) An array or colors to use with each object 98 | captions: (optional) A list of strings to use as captions for each object 99 | """ 100 | # Number of instances 101 | N = boxes.shape[0] 102 | if not N: 103 | print("\n*** No instances to display *** \n") 104 | else: 105 | assert boxes.shape[0] == masks.shape[-1] == class_ids.shape[0] 106 | 107 | # If no axis is passed, create one and automatically call show() 108 | auto_show = False 109 | if not ax: 110 | _, ax = plt.subplots(1, figsize=figsize) 111 | auto_show = True 112 | 113 | # Generate random colors 114 | colors = colors or random_colors(N) 115 | 116 | # Show area outside image boundaries. 117 | height, width = image.shape[:2] 118 | ax.set_ylim(height + 10, -10) 119 | ax.set_xlim(-10, width + 10) 120 | ax.axis('off') 121 | ax.set_title(title) 122 | 123 | masked_image = image.astype(np.uint32).copy() 124 | for i in range(N): 125 | color = colors[i] 126 | 127 | # Bounding box 128 | if not np.any(boxes[i]): 129 | # Skip this instance. Has no bbox. Likely lost in image cropping. 130 | continue 131 | y1, x1, y2, x2 = boxes[i] 132 | if show_bbox: 133 | p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2, 134 | alpha=0.7, linestyle="dashed", 135 | edgecolor=color, facecolor='none') 136 | ax.add_patch(p) 137 | 138 | # Label 139 | if not captions: 140 | class_id = class_ids[i] 141 | score = scores[i] if scores is not None else None 142 | label = class_names[class_id] 143 | caption = "{} {:.3f}".format(label, score) if score else label 144 | else: 145 | caption = captions[i] 146 | ax.text(x1, y1 + 8, caption, 147 | color='w', size=11, backgroundcolor="none") 148 | 149 | # Mask 150 | mask = masks[:, :, i] 151 | if show_mask: 152 | masked_image = apply_mask(masked_image, mask, color) 153 | 154 | # Mask Polygon 155 | # Pad to ensure proper polygons for masks that touch image edges. 156 | padded_mask = np.zeros( 157 | (mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8) 158 | padded_mask[1:-1, 1:-1] = mask 159 | contours = find_contours(padded_mask, 0.5) 160 | for verts in contours: 161 | # Subtract the padding and flip (y, x) to (x, y) 162 | verts = np.fliplr(verts) - 1 163 | p = Polygon(verts, facecolor="none", edgecolor=color) 164 | ax.add_patch(p) 165 | ax.imshow(masked_image.astype(np.uint8)) 166 | if auto_show: 167 | plt.show() 168 | 169 | 170 | def display_differences(image, 171 | gt_box, gt_class_id, gt_mask, 172 | pred_box, pred_class_id, pred_score, pred_mask, 173 | class_names, title="", ax=None, 174 | show_mask=True, show_box=True, 175 | iou_threshold=0.5, score_threshold=0.5): 176 | """Display ground truth and prediction instances on the same image.""" 177 | # Match predictions to ground truth 178 | gt_match, pred_match, overlaps = utils.compute_matches( 179 | gt_box, gt_class_id, gt_mask, 180 | pred_box, pred_class_id, pred_score, pred_mask, 181 | iou_threshold=iou_threshold, score_threshold=score_threshold) 182 | # Ground truth = green. Predictions = red 183 | colors = [(0, 1, 0, .8)] * len(gt_match)\ 184 | + [(1, 0, 0, 1)] * len(pred_match) 185 | # Concatenate GT and predictions 186 | class_ids = np.concatenate([gt_class_id, pred_class_id]) 187 | scores = np.concatenate([np.zeros([len(gt_match)]), pred_score]) 188 | boxes = np.concatenate([gt_box, pred_box]) 189 | masks = np.concatenate([gt_mask, pred_mask], axis=-1) 190 | # Captions per instance show score/IoU 191 | captions = ["" for m in gt_match] + ["{:.2f} / {:.2f}".format( 192 | pred_score[i], 193 | (overlaps[i, int(pred_match[i])] 194 | if pred_match[i] > -1 else overlaps[i].max())) 195 | for i in range(len(pred_match))] 196 | # Set title if not provided 197 | title = title or "Ground Truth and Detections\n GT=green, pred=red, captions: score/IoU" 198 | # Display 199 | display_instances( 200 | image, 201 | boxes, masks, class_ids, 202 | class_names, scores, ax=ax, 203 | show_bbox=show_box, show_mask=show_mask, 204 | colors=colors, captions=captions, 205 | title=title) 206 | 207 | 208 | def draw_rois(image, rois, refined_rois, mask, class_ids, class_names, limit=10): 209 | """ 210 | anchors: [n, (y1, x1, y2, x2)] list of anchors in image coordinates. 211 | proposals: [n, 4] the same anchors but refined to fit objects better. 212 | """ 213 | masked_image = image.copy() 214 | 215 | # Pick random anchors in case there are too many. 216 | ids = np.arange(rois.shape[0], dtype=np.int32) 217 | ids = np.random.choice( 218 | ids, limit, replace=False) if ids.shape[0] > limit else ids 219 | 220 | fig, ax = plt.subplots(1, figsize=(12, 12)) 221 | if rois.shape[0] > limit: 222 | plt.title("Showing {} random ROIs out of {}".format( 223 | len(ids), rois.shape[0])) 224 | else: 225 | plt.title("{} ROIs".format(len(ids))) 226 | 227 | # Show area outside image boundaries. 228 | ax.set_ylim(image.shape[0] + 20, -20) 229 | ax.set_xlim(-50, image.shape[1] + 20) 230 | ax.axis('off') 231 | 232 | for i, id in enumerate(ids): 233 | color = np.random.rand(3) 234 | class_id = class_ids[id] 235 | # ROI 236 | y1, x1, y2, x2 = rois[id] 237 | p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2, 238 | edgecolor=color if class_id else "gray", 239 | facecolor='none', linestyle="dashed") 240 | ax.add_patch(p) 241 | # Refined ROI 242 | if class_id: 243 | ry1, rx1, ry2, rx2 = refined_rois[id] 244 | p = patches.Rectangle((rx1, ry1), rx2 - rx1, ry2 - ry1, linewidth=2, 245 | edgecolor=color, facecolor='none') 246 | ax.add_patch(p) 247 | # Connect the top-left corners of the anchor and proposal for easy visualization 248 | ax.add_line(lines.Line2D([x1, rx1], [y1, ry1], color=color)) 249 | 250 | # Label 251 | label = class_names[class_id] 252 | ax.text(rx1, ry1 + 8, "{}".format(label), 253 | color='w', size=11, backgroundcolor="none") 254 | 255 | # Mask 256 | m = utils.unmold_mask(mask[id], rois[id] 257 | [:4].astype(np.int32), image.shape) 258 | masked_image = apply_mask(masked_image, m, color) 259 | 260 | ax.imshow(masked_image) 261 | 262 | # Print stats 263 | print("Positive ROIs: ", class_ids[class_ids > 0].shape[0]) 264 | print("Negative ROIs: ", class_ids[class_ids == 0].shape[0]) 265 | print("Positive Ratio: {:.2f}".format( 266 | class_ids[class_ids > 0].shape[0] / class_ids.shape[0])) 267 | 268 | 269 | # TODO: Replace with matplotlib equivalent? 270 | def draw_box(image, box, color): 271 | """Draw 3-pixel width bounding boxes on the given image array. 272 | color: list of 3 int values for RGB. 273 | """ 274 | y1, x1, y2, x2 = box 275 | image[y1:y1 + 2, x1:x2] = color 276 | image[y2:y2 + 2, x1:x2] = color 277 | image[y1:y2, x1:x1 + 2] = color 278 | image[y1:y2, x2:x2 + 2] = color 279 | return image 280 | 281 | 282 | def display_top_masks(image, mask, class_ids, class_names, limit=4): 283 | """Display the given image and the top few class masks.""" 284 | to_display = [] 285 | titles = [] 286 | to_display.append(image) 287 | titles.append("H x W={}x{}".format(image.shape[0], image.shape[1])) 288 | # Pick top prominent classes in this image 289 | unique_class_ids = np.unique(class_ids) 290 | mask_area = [np.sum(mask[:, :, np.where(class_ids == i)[0]]) 291 | for i in unique_class_ids] 292 | top_ids = [v[0] for v in sorted(zip(unique_class_ids, mask_area), 293 | key=lambda r: r[1], reverse=True) if v[1] > 0] 294 | # Generate images and titles 295 | for i in range(limit): 296 | class_id = top_ids[i] if i < len(top_ids) else -1 297 | # Pull masks of instances belonging to the same class. 298 | m = mask[:, :, np.where(class_ids == class_id)[0]] 299 | m = np.sum(m * np.arange(1, m.shape[-1] + 1), -1) 300 | to_display.append(m) 301 | titles.append(class_names[class_id] if class_id != -1 else "-") 302 | display_images(to_display, titles=titles, cols=limit + 1, cmap="Blues_r") 303 | 304 | 305 | def plot_precision_recall(AP, precisions, recalls): 306 | """Draw the precision-recall curve. 307 | 308 | AP: Average precision at IoU >= 0.5 309 | precisions: list of precision values 310 | recalls: list of recall values 311 | """ 312 | # Plot the Precision-Recall curve 313 | _, ax = plt.subplots(1) 314 | ax.set_title("Precision-Recall Curve. AP@50 = {:.3f}".format(AP)) 315 | ax.set_ylim(0, 1.1) 316 | ax.set_xlim(0, 1.1) 317 | _ = ax.plot(recalls, precisions) 318 | 319 | 320 | def plot_overlaps(gt_class_ids, pred_class_ids, pred_scores, 321 | overlaps, class_names, threshold=0.5): 322 | """Draw a grid showing how ground truth objects are classified. 323 | gt_class_ids: [N] int. Ground truth class IDs 324 | pred_class_id: [N] int. Predicted class IDs 325 | pred_scores: [N] float. The probability scores of predicted classes 326 | overlaps: [pred_boxes, gt_boxes] IoU overlaps of predictions and GT boxes. 327 | class_names: list of all class names in the dataset 328 | threshold: Float. The prediction probability required to predict a class 329 | """ 330 | gt_class_ids = gt_class_ids[gt_class_ids != 0] 331 | pred_class_ids = pred_class_ids[pred_class_ids != 0] 332 | 333 | plt.figure(figsize=(12, 10)) 334 | plt.imshow(overlaps, interpolation='nearest', cmap=plt.cm.Blues) 335 | plt.yticks(np.arange(len(pred_class_ids)), 336 | ["{} ({:.2f})".format(class_names[int(id)], pred_scores[i]) 337 | for i, id in enumerate(pred_class_ids)]) 338 | plt.xticks(np.arange(len(gt_class_ids)), 339 | [class_names[int(id)] for id in gt_class_ids], rotation=90) 340 | 341 | thresh = overlaps.max() / 2. 342 | for i, j in itertools.product(range(overlaps.shape[0]), 343 | range(overlaps.shape[1])): 344 | text = "" 345 | if overlaps[i, j] > threshold: 346 | text = "match" if gt_class_ids[j] == pred_class_ids[i] else "wrong" 347 | color = ("white" if overlaps[i, j] > thresh 348 | else "black" if overlaps[i, j] > 0 349 | else "grey") 350 | plt.text(j, i, "{:.3f}\n{}".format(overlaps[i, j], text), 351 | horizontalalignment="center", verticalalignment="center", 352 | fontsize=9, color=color) 353 | 354 | plt.tight_layout() 355 | plt.xlabel("Ground Truth") 356 | plt.ylabel("Predictions") 357 | 358 | 359 | def draw_boxes(image, boxes=None, refined_boxes=None, 360 | masks=None, captions=None, visibilities=None, 361 | title="", ax=None): 362 | """Draw bounding boxes and segmentation masks with different 363 | customizations. 364 | 365 | boxes: [N, (y1, x1, y2, x2, class_id)] in image coordinates. 366 | refined_boxes: Like boxes, but draw with solid lines to show 367 | that they're the result of refining 'boxes'. 368 | masks: [N, height, width] 369 | captions: List of N titles to display on each box 370 | visibilities: (optional) List of values of 0, 1, or 2. Determine how 371 | prominent each bounding box should be. 372 | title: An optional title to show over the image 373 | ax: (optional) Matplotlib axis to draw on. 374 | """ 375 | # Number of boxes 376 | assert boxes is not None or refined_boxes is not None 377 | N = boxes.shape[0] if boxes is not None else refined_boxes.shape[0] 378 | 379 | # Matplotlib Axis 380 | if not ax: 381 | _, ax = plt.subplots(1, figsize=(12, 12)) 382 | 383 | # Generate random colors 384 | colors = random_colors(N) 385 | 386 | # Show area outside image boundaries. 387 | margin = image.shape[0] // 10 388 | ax.set_ylim(image.shape[0] + margin, -margin) 389 | ax.set_xlim(-margin, image.shape[1] + margin) 390 | ax.axis('off') 391 | 392 | ax.set_title(title) 393 | 394 | masked_image = image.astype(np.uint32).copy() 395 | for i in range(N): 396 | # Box visibility 397 | visibility = visibilities[i] if visibilities is not None else 1 398 | if visibility == 0: 399 | color = "gray" 400 | style = "dotted" 401 | alpha = 0.5 402 | elif visibility == 1: 403 | color = colors[i] 404 | style = "dotted" 405 | alpha = 1 406 | elif visibility == 2: 407 | color = colors[i] 408 | style = "solid" 409 | alpha = 1 410 | 411 | # Boxes 412 | if boxes is not None: 413 | if not np.any(boxes[i]): 414 | # Skip this instance. Has no bbox. Likely lost in cropping. 415 | continue 416 | y1, x1, y2, x2 = boxes[i] 417 | p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2, 418 | alpha=alpha, linestyle=style, 419 | edgecolor=color, facecolor='none') 420 | ax.add_patch(p) 421 | 422 | # Refined boxes 423 | if refined_boxes is not None and visibility > 0: 424 | ry1, rx1, ry2, rx2 = refined_boxes[i].astype(np.int32) 425 | p = patches.Rectangle((rx1, ry1), rx2 - rx1, ry2 - ry1, linewidth=2, 426 | edgecolor=color, facecolor='none') 427 | ax.add_patch(p) 428 | # Connect the top-left corners of the anchor and proposal 429 | if boxes is not None: 430 | ax.add_line(lines.Line2D([x1, rx1], [y1, ry1], color=color)) 431 | 432 | # Captions 433 | if captions is not None: 434 | caption = captions[i] 435 | # If there are refined boxes, display captions on them 436 | if refined_boxes is not None: 437 | y1, x1, y2, x2 = ry1, rx1, ry2, rx2 438 | ax.text(x1, y1, caption, size=11, verticalalignment='top', 439 | color='w', backgroundcolor="none", 440 | bbox={'facecolor': color, 'alpha': 0.5, 441 | 'pad': 2, 'edgecolor': 'none'}) 442 | 443 | # Masks 444 | if masks is not None: 445 | mask = masks[:, :, i] 446 | masked_image = apply_mask(masked_image, mask, color) 447 | # Mask Polygon 448 | # Pad to ensure proper polygons for masks that touch image edges. 449 | padded_mask = np.zeros( 450 | (mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8) 451 | padded_mask[1:-1, 1:-1] = mask 452 | contours = find_contours(padded_mask, 0.5) 453 | for verts in contours: 454 | # Subtract the padding and flip (y, x) to (x, y) 455 | verts = np.fliplr(verts) - 1 456 | p = Polygon(verts, facecolor="none", edgecolor=color) 457 | ax.add_patch(p) 458 | ax.imshow(masked_image.astype(np.uint8)) 459 | 460 | 461 | def display_table(table): 462 | """Display values in a table format. 463 | table: an iterable of rows, and each row is an iterable of values. 464 | """ 465 | html = "" 466 | for row in table: 467 | row_html = "" 468 | for col in row: 469 | row_html += "{:40}".format(str(col)) 470 | html += "" + row_html + "" 471 | html = "" + html + "
" 472 | IPython.display.display(IPython.display.HTML(html)) 473 | 474 | 475 | def display_weight_stats(model): 476 | """Scans all the weights in the model and returns a list of tuples 477 | that contain stats about each weight. 478 | """ 479 | layers = model.get_trainable_layers() 480 | table = [["WEIGHT NAME", "SHAPE", "MIN", "MAX", "STD"]] 481 | for l in layers: 482 | weight_values = l.get_weights() # list of Numpy arrays 483 | weight_tensors = l.weights # list of TF tensors 484 | for i, w in enumerate(weight_values): 485 | weight_name = weight_tensors[i].name 486 | # Detect problematic layers. Exclude biases of conv layers. 487 | alert = "" 488 | if w.min() == w.max() and not (l.__class__.__name__ == "Conv2D" and i == 1): 489 | alert += "*** dead?" 490 | if np.abs(w.min()) > 1000 or np.abs(w.max()) > 1000: 491 | alert += "*** Overflow?" 492 | # Add row 493 | table.append([ 494 | weight_name + alert, 495 | str(w.shape), 496 | "{:+9.4f}".format(w.min()), 497 | "{:+10.4f}".format(w.max()), 498 | "{:+9.4f}".format(w.std()), 499 | ]) 500 | display_table(table) 501 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | numpy>=1.14.6 2 | IPython[all]>=7.0.1 3 | pandas>=0.23.4 4 | keras>=2.2.4 5 | matplotlib>=3.0.0 6 | scikit-learn>=0.20.0 7 | h5py>=2.8.0 8 | pydot>=1.2.4 9 | tqdm>=4.31.1 10 | scipy>=1.1.0 11 | livelossplot>=0.2.0 12 | scikit-image>=0.14.1 13 | tensorboard>=1.12.0 14 | tensorflow>=1.3.0 15 | opencv-python>=4.0.0.21 16 | imgaug>=0.2.8 -------------------------------------------------------------------------------- /utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cenkcorapci/fashion-parser/c42b3a11eb90118e02e5a3f247264c9b6e4b4f77/utils/__init__.py -------------------------------------------------------------------------------- /utils/image_utils.py: -------------------------------------------------------------------------------- 1 | import itertools 2 | 3 | import cv2 4 | import numpy as np 5 | 6 | from commons.config import IMAGE_SIZE 7 | 8 | 9 | def resize_image(image_path): 10 | img = cv2.imread(image_path) 11 | img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) 12 | img = cv2.resize(img, (IMAGE_SIZE, IMAGE_SIZE), interpolation=cv2.INTER_AREA) 13 | return img 14 | 15 | 16 | def to_rle(bits): 17 | ''' 18 | Convert data to run-length encoding 19 | :param bits: 20 | :return: 21 | ''' 22 | rle = [] 23 | pos = 0 24 | for bit, group in itertools.groupby(bits): 25 | group_list = list(group) 26 | if bit: 27 | rle.extend([pos, sum(group_list)]) 28 | pos += len(group_list) 29 | return rle 30 | 31 | 32 | def refine_masks(masks, rois): 33 | ''' 34 | Since the submission system does not permit overlapped masks, we have to fix them 35 | :param masks: 36 | :param rois: 37 | :return: 38 | ''' 39 | areas = np.sum(masks.reshape(-1, masks.shape[-1]), axis=0) 40 | mask_index = np.argsort(areas) 41 | union_mask = np.zeros(masks.shape[:-1], dtype=bool) 42 | for m in mask_index: 43 | masks[:, :, m] = np.logical_and(masks[:, :, m], np.logical_not(union_mask)) 44 | union_mask = np.logical_or(masks[:, :, m], union_mask) 45 | for m in range(masks.shape[-1]): 46 | mask_pos = np.where(masks[:, :, m] == True) 47 | if np.any(mask_pos): 48 | y1, x1 = np.min(mask_pos, axis=1) 49 | y2, x2 = np.max(mask_pos, axis=1) 50 | rois[m, :] = [y1, x1, y2, x2] 51 | return masks, rois 52 | --------------------------------------------------------------------------------