├── Helperfiles ├── inferFolder.ipynb ├── readme └── trainDataset.ipynb ├── Model ├── __pycache__ │ ├── config.cpython-35.pyc │ ├── model.cpython-35.pyc │ ├── utils.cpython-35.pyc │ └── visualize.cpython-35.pyc ├── config.py ├── model.py ├── readme ├── setup.py ├── utils.py └── visualize.py ├── README.md ├── Weights └── Readme ├── inferFolder.ipynb ├── segmented ├── 04214d167l.png ├── 04214d168.png └── 04225d553.png └── trainDataset.ipynb /Helperfiles/inferFolder.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "import os\n", 10 | "import sys\n", 11 | "import random\n", 12 | "import math\n", 13 | "import re\n", 14 | "import time\n", 15 | "import numpy as np\n", 16 | "import cv2\n", 17 | "import matplotlib\n", 18 | "import matplotlib.pyplot as plt\n", 19 | "import glob\n", 20 | "from sklearn import model_selection\n", 21 | "from config import Config\n", 22 | "import utils\n", 23 | "import model as modellib\n", 24 | "import visualize\n", 25 | "from model import log\n", 26 | "\n", 27 | "%matplotlib inline \n", 28 | "\n", 29 | "# Root directory of the project\n", 30 | "ROOT_DIR = os.getcwd()\n", 31 | "os.chdir(ROOT_DIR)\n", 32 | "\n", 33 | "# Directory to save logs and trained model\n", 34 | "MODEL_DIR = os.path.join(ROOT_DIR, \"logs\")\n", 35 | "\n", 36 | "# Path to COCO trained weights\n", 37 | "COCO_MODEL_PATH = os.path.join(ROOT_DIR, \"mask_rcnn_coco.h5\")\n", 38 | "\n", 39 | "MODEL_PATH = \"\"" 40 | ] 41 | }, 42 | { 43 | "cell_type": "code", 44 | "execution_count": null, 45 | "metadata": { 46 | "scrolled": true 47 | }, 48 | "outputs": [], 49 | "source": [ 50 | "class irisConfig(Config):\n", 51 | " \n", 52 | " \n", 53 | " # Give the configuration a recognizable name\n", 54 | " NAME = \"irises\"\n", 55 | "\n", 56 | " GPU_COUNT = 1\n", 57 | " IMAGES_PER_GPU = 2\n", 58 | "\n", 59 | " NUM_CLASSES = 1 + 1 # background + 3 shapes\n", 60 | "\n", 61 | " IMAGE_MIN_DIM = 768\n", 62 | " IMAGE_MAX_DIM = 1024\n", 63 | "\n", 64 | " # Use a small epoch since the data is simple\n", 65 | " STEPS_PER_EPOCH = 1000\n", 66 | "\n", 67 | " VALIDATION_STEPS = 50\n", 68 | " \n", 69 | "\n", 70 | "\n", 71 | "class InferenceConfig(irisConfig):\n", 72 | " GPU_COUNT = 1\n", 73 | " IMAGES_PER_GPU = 1\n", 74 | "\n", 75 | "inference_config = InferenceConfig()\n", 76 | "inference_config.display()\n", 77 | "model = modellib.MaskRCNN(mode=\"inference\", \n", 78 | " config=inference_config,\n", 79 | " model_dir=MODEL_DIR)\n", 80 | "\n", 81 | "#Loads weights from a static model file\n", 82 | "model.load_weights(MODEL_PATH, by_name=True)" 83 | ] 84 | }, 85 | { 86 | "cell_type": "code", 87 | "execution_count": null, 88 | "metadata": {}, 89 | "outputs": [], 90 | "source": [] 91 | }, 92 | { 93 | "cell_type": "code", 94 | "execution_count": null, 95 | "metadata": { 96 | "scrolled": true 97 | }, 98 | "outputs": [], 99 | "source": [ 100 | "import matplotlib.pyplot as plt\n", 101 | "import os\n", 102 | "import glob\n", 103 | "import cv2\n", 104 | "from sklearn import model_selection\n", 105 | "import re\n", 106 | "from scipy import ndimage\n", 107 | "import skimage.io\n", 108 | "from PIL import Image\n", 109 | "import numpy as np\n", 110 | "from skimage.io import imread\n", 111 | "from skimage.color import gray2rgb\n", 112 | "import time\n", 113 | "from visualize import display_images\n", 114 | "from visualize import display_weight_stats\n", 115 | "\n", 116 | "class_names = ['BG','iris']\n", 117 | "\n", 118 | "# Root directory of the project\n", 119 | "\n", 120 | "# Load images from folder\n", 121 | "\n", 122 | "numbers = re.compile(r'(\\d+)')\n", 123 | "def numericalSort(value):\n", 124 | " parts = numbers.split(value)\n", 125 | " parts[1::2] = map(int, parts[1::2])\n", 126 | " return parts\n", 127 | "\n", 128 | "def get_ax(rows=1, cols=1, size=8):\n", 129 | " \"\"\"Return a Matplotlib Axes array to be used in\n", 130 | " all visualizations in the notebook. Provide a\n", 131 | " central point to control graph sizes.\n", 132 | " \n", 133 | " Change the default size attribute to control the size\n", 134 | " of rendered images\n", 135 | " \"\"\"\n", 136 | " _, ax = plt.subplots(rows, cols, figsize=(size*cols, size*rows))\n", 137 | " return ax\n", 138 | "\n", 139 | "#Inference one at a time, change to batch (Memory per image calculate)\n", 140 | "def runonFolder(inputFolder,saveFolder):\n", 141 | " os.chdir(folderPath)\n", 142 | " imList = sorted(glob.glob(\"*\"),key=numericalSort)\n", 143 | " print ( len(imList))\n", 144 | " avgTime = 0.0\n", 145 | " \n", 146 | " newpath = saveFolder\n", 147 | " if not os.path.exists(newpath):\n", 148 | " os.makedirs(newpath)\n", 149 | " os.chdir(newpath)\n", 150 | " start = time.time()\n", 151 | "\n", 152 | " print (\"Saving in \" + os.getcwd())\n", 153 | " for i in range(len(imList)):\n", 154 | " im = imread(folderPath + \"/\" + imList[i])\n", 155 | " image = skimage.color.gray2rgb(im)\n", 156 | " im, window, scale, padding, crop = utils.resize_image(\n", 157 | " image,\n", 158 | " min_dim=inference_config.IMAGE_MIN_DIM,\n", 159 | " min_scale=inference_config.IMAGE_MIN_SCALE,\n", 160 | " max_dim=inference_config.IMAGE_MAX_DIM,\n", 161 | " mode=\"pad64\")\n", 162 | "\n", 163 | " results = model.detect([im], verbose=0)\n", 164 | " r = results[0]\n", 165 | " mask = r['masks'][:, :, 0]\n", 166 | " end = time.time()\n", 167 | " avgTime += (end-start)\n", 168 | "\n", 169 | " print (end-start)\n", 170 | "\n", 171 | " saveName = imList[i]\n", 172 | "\n", 173 | " saveName = re.sub('[.].*','.png',saveName)\n", 174 | " visualize.display_instances(im, r['rois'], r['masks'], r['class_ids'],class_names,show_bbox = True, ax=get_ax())\n", 175 | " matplotlib.image.imsave(saveName, mask) \n", 176 | "\n", 177 | " print (avgTime)\n", 178 | "\n", 179 | "#Folder containing iris images\n", 180 | "folderPath = ROOT_DIR+\"/ub\"\n", 181 | "\n", 182 | "#Segmented masks stored in /segmented\n", 183 | "runonFolder(folderPath,ROOT_DIR+\"/segmented\")\n", 184 | "\n" 185 | ] 186 | }, 187 | { 188 | "cell_type": "code", 189 | "execution_count": null, 190 | "metadata": {}, 191 | "outputs": [], 192 | "source": [] 193 | }, 194 | { 195 | "cell_type": "code", 196 | "execution_count": null, 197 | "metadata": {}, 198 | "outputs": [], 199 | "source": [] 200 | }, 201 | { 202 | "cell_type": "code", 203 | "execution_count": null, 204 | "metadata": {}, 205 | "outputs": [], 206 | "source": [] 207 | } 208 | ], 209 | "metadata": { 210 | "kernelspec": { 211 | "display_name": "Python 3", 212 | "language": "python", 213 | "name": "python3" 214 | }, 215 | "language_info": { 216 | "codemirror_mode": { 217 | "name": "ipython", 218 | "version": 3 219 | }, 220 | "file_extension": ".py", 221 | "mimetype": "text/x-python", 222 | "name": "python", 223 | "nbconvert_exporter": "python", 224 | "pygments_lexer": "ipython3", 225 | "version": "3.5.2" 226 | } 227 | }, 228 | "nbformat": 4, 229 | "nbformat_minor": 2 230 | } 231 | -------------------------------------------------------------------------------- /Helperfiles/readme: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /Helperfiles/trainDataset.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Mask R-CNN\n", 8 | "Implementation by Matterport\n", 9 | "\n" 10 | ] 11 | }, 12 | { 13 | "cell_type": "code", 14 | "execution_count": null, 15 | "metadata": {}, 16 | "outputs": [], 17 | "source": [ 18 | "import os\n", 19 | "import sys\n", 20 | "import random\n", 21 | "import math\n", 22 | "import re\n", 23 | "import time\n", 24 | "import numpy as np\n", 25 | "import cv2\n", 26 | "import matplotlib\n", 27 | "import matplotlib.pyplot as plt\n", 28 | "import glob\n", 29 | "from sklearn import model_selection\n", 30 | "from config import Config\n", 31 | "import utils\n", 32 | "import model as modellib\n", 33 | "import visualize\n", 34 | "from model import log\n", 35 | "HEIGHT = 280\n", 36 | "WIDTH = 320\n", 37 | "\n", 38 | "%matplotlib inline \n", 39 | "\n", 40 | "# Root directory of the project\n", 41 | "ROOT_DIR = os.getcwd()\n", 42 | "\n", 43 | "# Directory to save logs and trained model\n", 44 | "MODEL_DIR = os.path.join(ROOT_DIR, \"logs\")\n", 45 | "\n", 46 | "# Path to COCO trained weights\n", 47 | "COCO_MODEL_PATH = os.path.join(ROOT_DIR, \"mask_rcnn_coco.h5\")\n", 48 | "\n", 49 | "#Path to dataset\n", 50 | "DATASET_PATH = \"/dividedDataset\"" 51 | ] 52 | }, 53 | { 54 | "cell_type": "markdown", 55 | "metadata": {}, 56 | "source": [ 57 | "## Configurations" 58 | ] 59 | }, 60 | { 61 | "cell_type": "code", 62 | "execution_count": null, 63 | "metadata": {}, 64 | "outputs": [], 65 | "source": [ 66 | "class irisConfig(Config):\n", 67 | " \n", 68 | " \n", 69 | " # Give the configuration a recognizable name\n", 70 | " NAME = \"irises\"\n", 71 | "\n", 72 | " # Train on 1 GPU and 8 images per GPU. We can put multiple images on each\n", 73 | " # GPU because the images are small. Batch size is 8 (GPUs * images/GPU).\n", 74 | " GPU_COUNT = 1\n", 75 | " IMAGES_PER_GPU = 2\n", 76 | " #MINI_MASK_SHAPE = (84, 84) \n", 77 | " # Number of classes (including background)\n", 78 | " NUM_CLASSES = 1 + 1 # background + 3 shapes\n", 79 | " RPN_ANCHOR_STRIDE = 2\n", 80 | " # Use small images for faster training. Set the limits of the small side\n", 81 | " # the large side, and that determines the image shape.\n", 82 | " IMAGE_MIN_DIM = 240\n", 83 | " IMAGE_MAX_DIM = 640\n", 84 | " RPN_ANCHOR_SCALES = (32, 64, 128, 256, 512)\n", 85 | " #Ubiris only\n", 86 | " MEAN_PIXEL = np.array([ 127.8, 90.1, 76.3])\n", 87 | " #IMAGE_SHAPE = [320,256,3]\n", 88 | " # Use smaller anchors because our image and objects are small\n", 89 | " #RPN_ANCHOR_SCALES = (8, 16, 32, 64, 128) # anchor side in pixels\n", 90 | " #MINI_MASK_SHAPE = [40,40]\n", 91 | " # Reduce training ROIs per image because the images are small and have\n", 92 | " # few objects. Aim to allow ROI sampling to pick 33% positive ROIs.\n", 93 | " #TRAIN_ROIS_PER_IMAGE = 48\n", 94 | " \n", 95 | " # Use a small epoch since the data is simple\n", 96 | " STEPS_PER_EPOCH = 500\n", 97 | "\n", 98 | " # use small validation steps since the epoch is small\n", 99 | " VALIDATION_STEPS = 20 \n", 100 | " #Iris shape, Casia only for now\n", 101 | "config = irisConfig()\n", 102 | "config.display()\n", 103 | "\n" 104 | ] 105 | }, 106 | { 107 | "cell_type": "markdown", 108 | "metadata": {}, 109 | "source": [ 110 | "## Notebook Preferences" 111 | ] 112 | }, 113 | { 114 | "cell_type": "code", 115 | "execution_count": null, 116 | "metadata": {}, 117 | "outputs": [], 118 | "source": [ 119 | "def get_ax(rows=1, cols=1, size=8):\n", 120 | " \"\"\"Return a Matplotlib Axes array to be used in\n", 121 | " all visualizations in the notebook. Provide a\n", 122 | " central point to control graph sizes.\n", 123 | " \n", 124 | " Change the default size attribute to control the size\n", 125 | " of rendered images\n", 126 | " \"\"\"\n", 127 | " _, ax = plt.subplots(rows, cols, figsize=(size*cols, size*rows))\n", 128 | " return ax" 129 | ] 130 | }, 131 | { 132 | "cell_type": "markdown", 133 | "metadata": {}, 134 | "source": [ 135 | "## Dataset\n", 136 | "\n", 137 | "Create a synthetic dataset\n", 138 | "\n", 139 | "## Must divide iris dataset statically\n", 140 | "* load_image()\n", 141 | "* load_mask()\n", 142 | "* image_reference()" 143 | ] 144 | }, 145 | { 146 | "cell_type": "code", 147 | "execution_count": null, 148 | "metadata": { 149 | "scrolled": true 150 | }, 151 | "outputs": [], 152 | "source": [ 153 | "import re\n", 154 | "import skimage\n", 155 | "from skimage import io\n", 156 | "from sklearn import model_selection\n", 157 | "from skimage import util\n", 158 | "from numpy import newaxis\n", 159 | "import cv2\n", 160 | "\n", 161 | "# Loading the dataset, static folder for now\n", 162 | "datasetLoc = ROOT_DIR + DATASET_PATH\n", 163 | "\n", 164 | "numbers = re.compile(r'(\\d+)')\n", 165 | "def numericalSort(value):\n", 166 | " parts = numbers.split(value)\n", 167 | " parts[1::2] = map(int, parts[1::2])\n", 168 | " return parts\n", 169 | " \n", 170 | " \n", 171 | "class irisDataset(utils.Dataset):\n", 172 | " dType = \"\"\n", 173 | " y_valid = []\n", 174 | " y_train = []\n", 175 | " y_test = []\n", 176 | " def getmaskLoc(self,y_trai,y_vali):\n", 177 | " self.Y_training = y_trai\n", 178 | " self.Y_valid = y_vali\n", 179 | " print (str(len(dataset_train.Y_training)))\n", 180 | "\n", 181 | "\n", 182 | " def loadIris(self,dataset):\n", 183 | " print (self.dType)\n", 184 | " os.chdir(ROOT_DIR)\n", 185 | " cwd = os.getcwd()\n", 186 | " print (cwd)\n", 187 | " self.add_class(\"iris\",1,\"iris\")\n", 188 | " if(dataset == \"train\"):\n", 189 | " trainIris = datasetLoc + \"train/iris\"\n", 190 | " os.chdir(trainIris)\n", 191 | " X_train = sorted(glob.glob(\"*\"),key=numericalSort) \n", 192 | "\n", 193 | " trainMask = datasetLoc +\"train/mask\"\n", 194 | " os.chdir(trainMask)\n", 195 | " self.y_train = sorted(glob.glob(\"*\"),key=numericalSort) \n", 196 | " \n", 197 | " \n", 198 | " for i in range((len(X_train))-1):\n", 199 | " self.add_image(\"iris\",image_id = i,path = datasetLoc + \"train/iris/\" + X_train[i],width = WIDTH,height = HEIGHT)\n", 200 | " if(dataset == \"valid\"):\n", 201 | "\n", 202 | " validIris = datasetLoc + \"valid/iris\"\n", 203 | " os.chdir(validIris)\n", 204 | " X_valid = sorted(glob.glob(\"*\"),key=numericalSort) \n", 205 | "\n", 206 | " validMask = datasetLoc + \"valid/mask\"\n", 207 | " os.chdir(validMask)\n", 208 | " self.y_valid = sorted(glob.glob(\"*\"),key=numericalSort) \n", 209 | " \n", 210 | " for i in range((len(X_valid))-1):\n", 211 | " self.add_image(\"iris\",image_id = i,path = datasetLoc + \"valid/iris/\" + X_valid[i],width = WIDTH,height = HEIGHT)\n", 212 | "\n", 213 | " if(dataset == \"test\"):\n", 214 | " testIris = datasetLoc + \"test/iris\"\n", 215 | " os.chdir(testIris)\n", 216 | " X_test = sorted(glob.glob(\"*\"),key=numericalSort) \n", 217 | "\n", 218 | " testMask = datasetLoc +\"test/mask\"\n", 219 | " os.chdir(testMask)\n", 220 | " self.y_test = sorted(glob.glob(\"*\"),key=numericalSort) \n", 221 | " \n", 222 | " for i in range((len(X_test))-1):\n", 223 | " self.add_image(\"iris\",image_id = i,path = datasetLoc + \"test/iris/\" + X_test[i],width = WIDTH,height = HEIGHT)\n", 224 | "\n", 225 | " \n", 226 | " def load_mask(self,image_id):\n", 227 | " \n", 228 | " cwd = os.getcwd()\n", 229 | " image = np.ones(shape=(HEIGHT,WIDTH))\n", 230 | " \n", 231 | " if(self.dType == \"train\"):\n", 232 | " #print (len(self.y_train))\n", 233 | "\n", 234 | " image = skimage.io.imread(datasetLoc +\"train/mask/\" +self.y_train[image_id],as_grey=True)\n", 235 | " image = skimage.color.rgb2gray(image)\n", 236 | " if(self.dType == \"test\"):\n", 237 | " #print (len(self.y_test))\n", 238 | "\n", 239 | " image = skimage.io.imread(datasetLoc +\"test/mask/\" +self.y_test[image_id],as_grey=True)\n", 240 | " image = skimage.color.rgb2gray(image)\n", 241 | "\n", 242 | " if(self.dType == \"valid\"):\n", 243 | " #print (len(self.y_valid))\n", 244 | "\n", 245 | " image = skimage.io.imread(datasetLoc +\"valid/mask/\" +self.y_valid[image_id],as_grey=True)\n", 246 | " image = skimage.color.rgb2gray(image)\n", 247 | "\n", 248 | " class_ids = np.array([1])\n", 249 | " #New axis due to 1+ number of mask classes in actual implementation\n", 250 | " mask = image[:,:,newaxis]\n", 251 | "\n", 252 | " for x in range (mask.shape[0]):\n", 253 | " for y in range (mask.shape[1]):\n", 254 | " if mask[x][y] == 1:\n", 255 | " mask[x][y][0] = 1\n", 256 | " return mask,class_ids\n", 257 | " \n", 258 | "os.chdir(ROOT_DIR)\n", 259 | "\n", 260 | "# Training dataset\n", 261 | "dataset_train = irisDataset(\"train\")\n", 262 | "dataset_train.dType = \"train\"\n", 263 | "dataset_train.loadIris(\"train\")\n", 264 | "dataset_train.prepare()\n", 265 | "\n", 266 | "# Validation dataset\n", 267 | "dataset_val = irisDataset(\"valid\")\n", 268 | "dataset_val.dType = \"valid\"\n", 269 | "dataset_val.loadIris(\"valid\")\n", 270 | "dataset_val.prepare()\n", 271 | "\n", 272 | "# Test Dataset\n", 273 | "dataset_test = irisDataset(\"test\")\n", 274 | "dataset_test.dType = \"test\"\n", 275 | "dataset_test.loadIris(\"test\")\n", 276 | "dataset_test.prepare()\n" 277 | ] 278 | }, 279 | { 280 | "cell_type": "code", 281 | "execution_count": null, 282 | "metadata": {}, 283 | "outputs": [], 284 | "source": [] 285 | }, 286 | { 287 | "cell_type": "code", 288 | "execution_count": null, 289 | "metadata": { 290 | "scrolled": true 291 | }, 292 | "outputs": [], 293 | "source": [ 294 | "os.chdir(ROOT_DIR + \"/logs\")\n", 295 | "# Displays random samples from each partition of the dataset\n", 296 | "image_ids = np.random.choice(dataset_train.image_ids, 2)\n", 297 | "for image_id in image_ids:\n", 298 | " image = dataset_train.load_image(image_id)\n", 299 | " print (image.shape)\n", 300 | " print (image_id)\n", 301 | " mask, class_ids = dataset_train.load_mask(image_id)\n", 302 | " print (mask.shape) \n", 303 | " visualize.display_top_masks(image, mask, class_ids, dataset_train.class_names)\n", 304 | " \n", 305 | "image_ids = np.random.choice(dataset_val.image_ids, 2)\n", 306 | "for image_id in image_ids:\n", 307 | " image = dataset_val.load_image(image_id)\n", 308 | " print (image_id)\n", 309 | " mask, class_ids = dataset_val.load_mask(image_id)\n", 310 | " print (mask.shape) \n", 311 | " visualize.display_top_masks(image, mask, class_ids, dataset_val.class_names)\n", 312 | " \n", 313 | "image_ids = np.random.choice(dataset_test.image_ids, 2)\n", 314 | "for image_id in image_ids:\n", 315 | " image = dataset_test.load_image(image_id)\n", 316 | " print (image_id)\n", 317 | " mask, class_ids = dataset_test.load_mask(image_id)\n", 318 | " print (mask.shape) \n", 319 | " visualize.display_top_masks(image, mask, class_ids, dataset_test.class_names)" 320 | ] 321 | }, 322 | { 323 | "cell_type": "markdown", 324 | "metadata": {}, 325 | "source": [ 326 | "## Model" 327 | ] 328 | }, 329 | { 330 | "cell_type": "markdown", 331 | "metadata": {}, 332 | "source": [ 333 | "#### Create model in training mode\n", 334 | "model = modellib.MaskRCNN(mode=\"training\", config=config,\n", 335 | " model_dir=MODEL_DIR)" 336 | ] 337 | }, 338 | { 339 | "cell_type": "code", 340 | "execution_count": null, 341 | "metadata": {}, 342 | "outputs": [], 343 | "source": [ 344 | "model = modellib.MaskRCNN(mode=\"training\", config=config,\n", 345 | " model_dir=MODEL_DIR)\n", 346 | "\n", 347 | "init_with = \"coco\" # imagenet, coco, or last\n", 348 | "\n", 349 | "if init_with == \"imagenet\":\n", 350 | " model.load_weights(model.get_imagenet_weights(), by_name=True)\n", 351 | "elif init_with == \"coco\":\n", 352 | " model.load_weights(COCO_MODEL_PATH, by_name=True,\n", 353 | " exclude=[\"mrcnn_class_logits\", \"mrcnn_bbox_fc\", \n", 354 | " \"mrcnn_bbox\", \"mrcnn_mask\"])\n", 355 | " print (COCO_MODEL_PATH)\n", 356 | "elif init_with == \"last\":\n", 357 | " model.load_weights(model.find_last()[1], by_name=True)\n", 358 | " print (model.find_last()[1])\n" 359 | ] 360 | }, 361 | { 362 | "cell_type": "markdown", 363 | "metadata": {}, 364 | "source": [ 365 | "## Training\n", 366 | "\n", 367 | "Train in two stages:\n", 368 | "1. Training only the head layers\n", 369 | "\n", 370 | "2. Fine tuning left over layers" 371 | ] 372 | }, 373 | { 374 | "cell_type": "code", 375 | "execution_count": null, 376 | "metadata": { 377 | "scrolled": true 378 | }, 379 | "outputs": [], 380 | "source": [ 381 | "import imgaug\n", 382 | "from imgaug import augmenters as iaa\n", 383 | "\n", 384 | " \n", 385 | "\n", 386 | "augmentation = imgaug.augmenters.Fliplr(0.5)\n", 387 | "\n", 388 | "model.train(dataset_train, dataset_val, \n", 389 | " learning_rate=config.LEARNING_RATE, \n", 390 | " epochs=8, \n", 391 | " layers='all',\n", 392 | " augmentation = augmentation\n", 393 | " )" 394 | ] 395 | }, 396 | { 397 | "cell_type": "code", 398 | "execution_count": null, 399 | "metadata": { 400 | "scrolled": true 401 | }, 402 | "outputs": [], 403 | "source": [ 404 | "augmentation = imgaug.augmenters.Fliplr(0.5)\n", 405 | "\n", 406 | "model.train(dataset_train, dataset_val, \n", 407 | " learning_rate=config.LEARNING_RATE, \n", 408 | " epochs=16, \n", 409 | " layers='5+',\n", 410 | " augmentation = augmentation\n", 411 | " )" 412 | ] 413 | }, 414 | { 415 | "cell_type": "code", 416 | "execution_count": null, 417 | "metadata": { 418 | "scrolled": true 419 | }, 420 | "outputs": [], 421 | "source": [ 422 | "augmentation = imgaug.augmenters.Fliplr(0.5)\n", 423 | "\n", 424 | "model.train(dataset_train, dataset_val, \n", 425 | " learning_rate=config.LEARNING_RATE, \n", 426 | " epochs=24, \n", 427 | " layers='all',\n", 428 | " augmentation = augmentation\n", 429 | " )" 430 | ] 431 | }, 432 | { 433 | "cell_type": "code", 434 | "execution_count": null, 435 | "metadata": { 436 | "scrolled": true 437 | }, 438 | "outputs": [], 439 | "source": [] 440 | }, 441 | { 442 | "cell_type": "code", 443 | "execution_count": null, 444 | "metadata": {}, 445 | "outputs": [], 446 | "source": [] 447 | }, 448 | { 449 | "cell_type": "code", 450 | "execution_count": null, 451 | "metadata": {}, 452 | "outputs": [], 453 | "source": [] 454 | } 455 | ], 456 | "metadata": { 457 | "kernelspec": { 458 | "display_name": "Python 3", 459 | "language": "python", 460 | "name": "python3" 461 | }, 462 | "language_info": { 463 | "codemirror_mode": { 464 | "name": "ipython", 465 | "version": 3 466 | }, 467 | "file_extension": ".py", 468 | "mimetype": "text/x-python", 469 | "name": "python", 470 | "nbconvert_exporter": "python", 471 | "pygments_lexer": "ipython3", 472 | "version": "3.5.2" 473 | } 474 | }, 475 | "nbformat": 4, 476 | "nbformat_minor": 2 477 | } 478 | -------------------------------------------------------------------------------- /Model/__pycache__/config.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohaib50k/Unconstrained-iris-segmentation-using-Mask-R-CNN/b435cea5b956472aebfc0aaf1d468f837e0a09f3/Model/__pycache__/config.cpython-35.pyc -------------------------------------------------------------------------------- /Model/__pycache__/model.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohaib50k/Unconstrained-iris-segmentation-using-Mask-R-CNN/b435cea5b956472aebfc0aaf1d468f837e0a09f3/Model/__pycache__/model.cpython-35.pyc -------------------------------------------------------------------------------- /Model/__pycache__/utils.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohaib50k/Unconstrained-iris-segmentation-using-Mask-R-CNN/b435cea5b956472aebfc0aaf1d468f837e0a09f3/Model/__pycache__/utils.cpython-35.pyc -------------------------------------------------------------------------------- /Model/__pycache__/visualize.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohaib50k/Unconstrained-iris-segmentation-using-Mask-R-CNN/b435cea5b956472aebfc0aaf1d468f837e0a09f3/Model/__pycache__/visualize.cpython-35.pyc -------------------------------------------------------------------------------- /Model/config.py: -------------------------------------------------------------------------------- 1 | """ 2 | Mask R-CNN 3 | Base Configurations class. 4 | 5 | Copyright (c) 2017 Matterport, Inc. 6 | Licensed under the MIT License (see LICENSE for details) 7 | Written by Waleed Abdulla 8 | """ 9 | 10 | import math 11 | import numpy as np 12 | 13 | 14 | # Base Configuration Class 15 | # Don't use this class directly. Instead, sub-class it and override 16 | # the configurations you need to change. 17 | 18 | class Config(object): 19 | """Base configuration class. For custom configurations, create a 20 | sub-class that inherits from this one and override properties 21 | that need to be changed. 22 | """ 23 | # Name the configurations. For example, 'COCO', 'Experiment 3', ...etc. 24 | # Useful if your code needs to do things differently depending on which 25 | # experiment is running. 26 | NAME = None # Override in sub-classes 27 | 28 | # NUMBER OF GPUs to use. For CPU training, use 1 29 | GPU_COUNT = 1 30 | 31 | # Number of images to train with on each GPU. A 12GB GPU can typically 32 | # handle 2 images of 1024x1024px. 33 | # Adjust based on your GPU memory and image sizes. Use the highest 34 | # number that your GPU can handle for best performance. 35 | IMAGES_PER_GPU = 2 36 | 37 | # Number of training steps per epoch 38 | # This doesn't need to match the size of the training set. Tensorboard 39 | # updates are saved at the end of each epoch, so setting this to a 40 | # smaller number means getting more frequent TensorBoard updates. 41 | # Validation stats are also calculated at each epoch end and they 42 | # might take a while, so don't set this too small to avoid spending 43 | # a lot of time on validation stats. 44 | STEPS_PER_EPOCH = 1000 45 | 46 | # Number of validation steps to run at the end of every training epoch. 47 | # A bigger number improves accuracy of validation stats, but slows 48 | # down the training. 49 | VALIDATION_STEPS = 50 50 | 51 | # Backbone network architecture 52 | # Supported values are: resnet50, resnet101 53 | BACKBONE = "resnet101" 54 | 55 | # The strides of each layer of the FPN Pyramid. These values 56 | # are based on a Resnet101 backbone. 57 | BACKBONE_STRIDES = [4, 8, 16, 32, 64] 58 | 59 | # Number of classification classes (including background) 60 | NUM_CLASSES = 1 # Override in sub-classes 61 | 62 | # Length of square anchor side in pixels 63 | RPN_ANCHOR_SCALES = (32, 64, 128, 256, 512) 64 | 65 | # Ratios of anchors at each cell (width/height) 66 | # A value of 1 represents a square anchor, and 0.5 is a wide anchor 67 | RPN_ANCHOR_RATIOS = [0.5, 1, 2] 68 | 69 | # Anchor stride 70 | # If 1 then anchors are created for each cell in the backbone feature map. 71 | # If 2, then anchors are created for every other cell, and so on. 72 | RPN_ANCHOR_STRIDE = 1 73 | 74 | # Non-max suppression threshold to filter RPN proposals. 75 | # You can increase this during training to generate more propsals. 76 | RPN_NMS_THRESHOLD = 0.7 77 | 78 | # How many anchors per image to use for RPN training 79 | RPN_TRAIN_ANCHORS_PER_IMAGE = 256 80 | 81 | # ROIs kept after non-maximum supression (training and inference) 82 | POST_NMS_ROIS_TRAINING = 2000 83 | POST_NMS_ROIS_INFERENCE = 1000 84 | 85 | # If enabled, resizes instance masks to a smaller size to reduce 86 | # memory load. Recommended when using high-resolution images. 87 | USE_MINI_MASK = True 88 | MINI_MASK_SHAPE = (56, 56) # (height, width) of the mini-mask 89 | 90 | # Input image resizing 91 | # Generally, use the "square" resizing mode for training and inferencing 92 | # and it should work well in most cases. In this mode, images are scaled 93 | # up such that the small side is = IMAGE_MIN_DIM, but ensuring that the 94 | # scaling doesn't make the long side > IMAGE_MAX_DIM. Then the image is 95 | # padded with zeros to make it a square so multiple images can be put 96 | # in one batch. 97 | # Available resizing modes: 98 | # none: No resizing or padding. Return the image unchanged. 99 | # square: Resize and pad with zeros to get a square image 100 | # of size [max_dim, max_dim]. 101 | # pad64: Pads width and height with zeros to make them multiples of 64. 102 | # If IMAGE_MIN_DIM or IMAGE_MIN_SCALE are not None, then it scales 103 | # up before padding. IMAGE_MAX_DIM is ignored in this mode. 104 | # The multiple of 64 is needed to ensure smooth scaling of feature 105 | # maps up and down the 6 levels of the FPN pyramid (2**6=64). 106 | # crop: Picks random crops from the image. First, scales the image based 107 | # on IMAGE_MIN_DIM and IMAGE_MIN_SCALE, then picks a random crop of 108 | # size IMAGE_MIN_DIM x IMAGE_MIN_DIM. Can be used in training only. 109 | # IMAGE_MAX_DIM is not used in this mode. 110 | IMAGE_RESIZE_MODE = "square" 111 | IMAGE_MIN_DIM = 800 112 | IMAGE_MAX_DIM = 1024 113 | # Minimum scaling ratio. Checked after MIN_IMAGE_DIM and can force further 114 | # up scaling. For example, if set to 2 then images are scaled up to double 115 | # the width and height, or more, even if MIN_IMAGE_DIM doesn't require it. 116 | # Howver, in 'square' mode, it can be overruled by IMAGE_MAX_DIM. 117 | IMAGE_MIN_SCALE = 0 118 | 119 | # Image mean (RGB) 120 | MEAN_PIXEL = np.array([123.7, 116.8, 103.9]) 121 | 122 | # Number of ROIs per image to feed to classifier/mask heads 123 | # The Mask RCNN paper uses 512 but often the RPN doesn't generate 124 | # enough positive proposals to fill this and keep a positive:negative 125 | # ratio of 1:3. You can increase the number of proposals by adjusting 126 | # the RPN NMS threshold. 127 | TRAIN_ROIS_PER_IMAGE = 200 128 | 129 | # Percent of positive ROIs used to train classifier/mask heads 130 | ROI_POSITIVE_RATIO = 0.33 131 | 132 | # Pooled ROIs 133 | POOL_SIZE = 7 134 | MASK_POOL_SIZE = 14 135 | 136 | # Shape of output mask 137 | # To change this you also need to change the neural network mask branch 138 | MASK_SHAPE = [28, 28] 139 | 140 | # Maximum number of ground truth instances to use in one image 141 | MAX_GT_INSTANCES = 100 142 | 143 | # Bounding box refinement standard deviation for RPN and final detections. 144 | RPN_BBOX_STD_DEV = np.array([0.1, 0.1, 0.2, 0.2]) 145 | BBOX_STD_DEV = np.array([0.1, 0.1, 0.2, 0.2]) 146 | 147 | # Max number of final detections 148 | DETECTION_MAX_INSTANCES = 100 149 | 150 | # Minimum probability value to accept a detected instance 151 | # ROIs below this threshold are skipped 152 | DETECTION_MIN_CONFIDENCE = 0.7 153 | 154 | # Non-maximum suppression threshold for detection 155 | DETECTION_NMS_THRESHOLD = 0.3 156 | 157 | # Learning rate and momentum 158 | # The Mask RCNN paper uses lr=0.02, but on TensorFlow it causes 159 | # weights to explode. Likely due to differences in optimzer 160 | # implementation. 161 | LEARNING_RATE = 0.001 162 | LEARNING_MOMENTUM = 0.9 163 | 164 | # Weight decay regularization 165 | WEIGHT_DECAY = 0.0001 166 | 167 | # Use RPN ROIs or externally generated ROIs for training 168 | # Keep this True for most situations. Set to False if you want to train 169 | # the head branches on ROI generated by code rather than the ROIs from 170 | # the RPN. For example, to debug the classifier head without having to 171 | # train the RPN. 172 | USE_RPN_ROIS = True 173 | 174 | # Train or freeze batch normalization layers 175 | # None: Train BN layers. This is the normal mode 176 | # False: Freeze BN layers. Good when using a small batch size 177 | # True: (don't use). Set layer in training mode even when inferencing 178 | TRAIN_BN = False # Defaulting to False since batch size is often small 179 | 180 | # Gradient norm clipping 181 | GRADIENT_CLIP_NORM = 5.0 182 | 183 | def __init__(self): 184 | """Set values of computed attributes.""" 185 | # Effective batch size 186 | self.BATCH_SIZE = self.IMAGES_PER_GPU * self.GPU_COUNT 187 | 188 | # Input image size 189 | if self.IMAGE_RESIZE_MODE == "crop": 190 | self.IMAGE_SHAPE = np.array([self.IMAGE_MIN_DIM, self.IMAGE_MIN_DIM, 3]) 191 | else: 192 | self.IMAGE_SHAPE = np.array([self.IMAGE_MAX_DIM, self.IMAGE_MAX_DIM, 3]) 193 | 194 | # Image meta data length 195 | # See compose_image_meta() for details 196 | self.IMAGE_META_SIZE = 1 + 3 + 3 + 4 + 1 + self.NUM_CLASSES 197 | 198 | def display(self): 199 | """Display Configuration values.""" 200 | print("\nConfigurations:") 201 | for a in dir(self): 202 | if not a.startswith("__") and not callable(getattr(self, a)): 203 | print("{:30} {}".format(a, getattr(self, a))) 204 | print("\n") 205 | -------------------------------------------------------------------------------- /Model/readme: -------------------------------------------------------------------------------- 1 | Implementation files from the Matterport implementation 2 | -------------------------------------------------------------------------------- /Model/setup.py: -------------------------------------------------------------------------------- 1 | """ 2 | The build/compilations setup 3 | 4 | >> pip install -r requirements.txt 5 | >> python setup.py install 6 | """ 7 | import pip 8 | import logging 9 | import pkg_resources 10 | try: 11 | from setuptools import setup 12 | except ImportError: 13 | from distutils.core import setup 14 | 15 | 16 | def _parse_requirements(file_path): 17 | pip_ver = pkg_resources.get_distribution('pip').version 18 | pip_version = list(map(int, pip_ver.split('.')[:2])) 19 | if pip_version >= [6, 0]: 20 | raw = pip.req.parse_requirements(file_path, 21 | session=pip.download.PipSession()) 22 | else: 23 | raw = pip.req.parse_requirements(file_path) 24 | return [str(i.req) for i in raw] 25 | 26 | 27 | # parse_requirements() returns generator of pip.req.InstallRequirement objects 28 | try: 29 | install_reqs = _parse_requirements("requirements.txt") 30 | except Exception: 31 | logging.warning('Fail load requirements file, so using default ones.') 32 | install_reqs = [] 33 | 34 | setup( 35 | name='mask-rcnn', 36 | version='2.1', 37 | url='https://github.com/matterport/Mask_RCNN', 38 | author='Matterport', 39 | author_email='waleed.abdulla@gmail.com', 40 | license='MIT', 41 | description='Mask R-CNN for object detection and instance segmentation', 42 | packages=["mrcnn"], 43 | install_requires=install_reqs, 44 | include_package_data=True, 45 | python_requires='>=3.4', 46 | long_description="""This is an implementation of Mask R-CNN on Python 3, Keras, and TensorFlow. 47 | The model generates bounding boxes and segmentation masks for each instance of an object in the image. 48 | It's based on Feature Pyramid Network (FPN) and a ResNet101 backbone.""", 49 | classifiers=[ 50 | "Development Status :: 5 - Production/Stable", 51 | "Environment :: Console", 52 | "Intended Audience :: Developers", 53 | "Intended Audience :: Information Technology", 54 | "Intended Audience :: Education", 55 | "Intended Audience :: Science/Research", 56 | "License :: OSI Approved :: MIT License", 57 | "Natural Language :: English", 58 | "Operating System :: OS Independent", 59 | "Topic :: Scientific/Engineering :: Artificial Intelligence", 60 | "Topic :: Scientific/Engineering :: Image Recognition", 61 | "Topic :: Scientific/Engineering :: Visualization", 62 | "Topic :: Scientific/Engineering :: Image Segmentation", 63 | 'Programming Language :: Python :: 3.4', 64 | 'Programming Language :: Python :: 3.5', 65 | 'Programming Language :: Python :: 3.6', 66 | ], 67 | keywords="image instance segmentation object detection mask rcnn r-cnn tensorflow keras", 68 | ) 69 | -------------------------------------------------------------------------------- /Model/utils.py: -------------------------------------------------------------------------------- 1 | """ 2 | Mask R-CNN 3 | Common utility functions and classes. 4 | 5 | Copyright (c) 2017 Matterport, Inc. 6 | Licensed under the MIT License (see LICENSE for details) 7 | Written by Waleed Abdulla 8 | """ 9 | 10 | import sys 11 | import os 12 | import math 13 | import random 14 | import numpy as np 15 | import tensorflow as tf 16 | import scipy 17 | import skimage.color 18 | import skimage.io 19 | import skimage.transform 20 | import urllib.request 21 | import shutil 22 | import warnings 23 | 24 | # URL from which to download the latest COCO trained weights 25 | COCO_MODEL_URL = "https://github.com/matterport/Mask_RCNN/releases/download/v2.0/mask_rcnn_coco.h5" 26 | 27 | 28 | ############################################################ 29 | # Bounding Boxes 30 | ############################################################ 31 | 32 | def extract_bboxes(mask): 33 | """Compute bounding boxes from masks. 34 | mask: [height, width, num_instances]. Mask pixels are either 1 or 0. 35 | 36 | Returns: bbox array [num_instances, (y1, x1, y2, x2)]. 37 | """ 38 | boxes = np.zeros([mask.shape[-1], 4], dtype=np.int32) 39 | for i in range(mask.shape[-1]): 40 | m = mask[:, :, i] 41 | # Bounding box. 42 | horizontal_indicies = np.where(np.any(m, axis=0))[0] 43 | vertical_indicies = np.where(np.any(m, axis=1))[0] 44 | if horizontal_indicies.shape[0]: 45 | x1, x2 = horizontal_indicies[[0, -1]] 46 | y1, y2 = vertical_indicies[[0, -1]] 47 | # x2 and y2 should not be part of the box. Increment by 1. 48 | x2 += 6 49 | y2 += 6 50 | x1 = x1-6 51 | y1 = y1-6 52 | else: 53 | # No mask for this instance. Might happen due to 54 | # resizing or cropping. Set bbox to zeros 55 | x1, x2, y1, y2 = 0, 0, 0, 0 56 | boxes[i] = np.array([y1, x1, y2, x2]) 57 | return boxes.astype(np.int32) 58 | 59 | 60 | def compute_iou(box, boxes, box_area, boxes_area): 61 | """Calculates IoU of the given box with the array of the given boxes. 62 | box: 1D vector [y1, x1, y2, x2] 63 | boxes: [boxes_count, (y1, x1, y2, x2)] 64 | box_area: float. the area of 'box' 65 | boxes_area: array of length boxes_count. 66 | 67 | Note: the areas are passed in rather than calculated here for 68 | efficency. Calculate once in the caller to avoid duplicate work. 69 | """ 70 | # Calculate intersection areas 71 | y1 = np.maximum(box[0], boxes[:, 0]) 72 | y2 = np.minimum(box[2], boxes[:, 2]) 73 | x1 = np.maximum(box[1], boxes[:, 1]) 74 | x2 = np.minimum(box[3], boxes[:, 3]) 75 | intersection = np.maximum(x2 - x1, 0) * np.maximum(y2 - y1, 0) 76 | union = box_area + boxes_area[:] - intersection[:] 77 | iou = intersection / union 78 | return iou 79 | 80 | 81 | def compute_overlaps(boxes1, boxes2): 82 | """Computes IoU overlaps between two sets of boxes. 83 | boxes1, boxes2: [N, (y1, x1, y2, x2)]. 84 | 85 | For better performance, pass the largest set first and the smaller second. 86 | """ 87 | # Areas of anchors and GT boxes 88 | area1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1]) 89 | area2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1]) 90 | 91 | # Compute overlaps to generate matrix [boxes1 count, boxes2 count] 92 | # Each cell contains the IoU value. 93 | overlaps = np.zeros((boxes1.shape[0], boxes2.shape[0])) 94 | for i in range(overlaps.shape[1]): 95 | box2 = boxes2[i] 96 | overlaps[:, i] = compute_iou(box2, boxes1, area2[i], area1) 97 | return overlaps 98 | 99 | 100 | def compute_overlaps_masks(masks1, masks2): 101 | '''Computes IoU overlaps between two sets of masks. 102 | masks1, masks2: [Height, Width, instances] 103 | ''' 104 | # flatten masks 105 | masks1 = np.reshape(masks1 > .5, (-1, masks1.shape[-1])).astype(np.float32) 106 | masks2 = np.reshape(masks2 > .5, (-1, masks2.shape[-1])).astype(np.float32) 107 | area1 = np.sum(masks1, axis=0) 108 | area2 = np.sum(masks2, axis=0) 109 | 110 | # intersections and union 111 | intersections = np.dot(masks1.T, masks2) 112 | union = area1[:, None] + area2[None, :] - intersections 113 | overlaps = intersections / union 114 | 115 | return overlaps 116 | 117 | 118 | def non_max_suppression(boxes, scores, threshold): 119 | """Performs non-maximum supression and returns indicies of kept boxes. 120 | boxes: [N, (y1, x1, y2, x2)]. Notice that (y2, x2) lays outside the box. 121 | scores: 1-D array of box scores. 122 | threshold: Float. IoU threshold to use for filtering. 123 | """ 124 | assert boxes.shape[0] > 0 125 | if boxes.dtype.kind != "f": 126 | boxes = boxes.astype(np.float32) 127 | 128 | # Compute box areas 129 | y1 = boxes[:, 0] 130 | x1 = boxes[:, 1] 131 | y2 = boxes[:, 2] 132 | x2 = boxes[:, 3] 133 | area = (y2 - y1) * (x2 - x1) 134 | 135 | # Get indicies of boxes sorted by scores (highest first) 136 | ixs = scores.argsort()[::-1] 137 | 138 | pick = [] 139 | while len(ixs) > 0: 140 | # Pick top box and add its index to the list 141 | i = ixs[0] 142 | pick.append(i) 143 | # Compute IoU of the picked box with the rest 144 | iou = compute_iou(boxes[i], boxes[ixs[1:]], area[i], area[ixs[1:]]) 145 | # Identify boxes with IoU over the threshold. This 146 | # returns indicies into ixs[1:], so add 1 to get 147 | # indicies into ixs. 148 | remove_ixs = np.where(iou > threshold)[0] + 1 149 | # Remove indicies of the picked and overlapped boxes. 150 | ixs = np.delete(ixs, remove_ixs) 151 | ixs = np.delete(ixs, 0) 152 | return np.array(pick, dtype=np.int32) 153 | 154 | 155 | def apply_box_deltas(boxes, deltas): 156 | """Applies the given deltas to the given boxes. 157 | boxes: [N, (y1, x1, y2, x2)]. Note that (y2, x2) is outside the box. 158 | deltas: [N, (dy, dx, log(dh), log(dw))] 159 | """ 160 | boxes = boxes.astype(np.float32) 161 | # Convert to y, x, h, w 162 | height = boxes[:, 2] - boxes[:, 0] 163 | width = boxes[:, 3] - boxes[:, 1] 164 | center_y = boxes[:, 0] + 0.5 * height 165 | center_x = boxes[:, 1] + 0.5 * width 166 | # Apply deltas 167 | center_y += deltas[:, 0] * height 168 | center_x += deltas[:, 1] * width 169 | height *= np.exp(deltas[:, 2]) 170 | width *= np.exp(deltas[:, 3]) 171 | # Convert back to y1, x1, y2, x2 172 | y1 = center_y - 0.5 * height 173 | x1 = center_x - 0.5 * width 174 | y2 = y1 + height 175 | x2 = x1 + width 176 | return np.stack([y1, x1, y2, x2], axis=1) 177 | 178 | 179 | def box_refinement_graph(box, gt_box): 180 | """Compute refinement needed to transform box to gt_box. 181 | box and gt_box are [N, (y1, x1, y2, x2)] 182 | """ 183 | box = tf.cast(box, tf.float32) 184 | gt_box = tf.cast(gt_box, tf.float32) 185 | 186 | height = box[:, 2] - box[:, 0] 187 | width = box[:, 3] - box[:, 1] 188 | center_y = box[:, 0] + 0.5 * height 189 | center_x = box[:, 1] + 0.5 * width 190 | 191 | gt_height = gt_box[:, 2] - gt_box[:, 0] 192 | gt_width = gt_box[:, 3] - gt_box[:, 1] 193 | gt_center_y = gt_box[:, 0] + 0.5 * gt_height 194 | gt_center_x = gt_box[:, 1] + 0.5 * gt_width 195 | 196 | dy = (gt_center_y - center_y) / height 197 | dx = (gt_center_x - center_x) / width 198 | dh = tf.log(gt_height / height) 199 | dw = tf.log(gt_width / width) 200 | 201 | result = tf.stack([dy, dx, dh, dw], axis=1) 202 | return result 203 | 204 | 205 | def box_refinement(box, gt_box): 206 | """Compute refinement needed to transform box to gt_box. 207 | box and gt_box are [N, (y1, x1, y2, x2)]. (y2, x2) is 208 | assumed to be outside the box. 209 | """ 210 | box = box.astype(np.float32) 211 | gt_box = gt_box.astype(np.float32) 212 | 213 | height = box[:, 2] - box[:, 0] 214 | width = box[:, 3] - box[:, 1] 215 | center_y = box[:, 0] + 0.5 * height 216 | center_x = box[:, 1] + 0.5 * width 217 | 218 | gt_height = gt_box[:, 2] - gt_box[:, 0] 219 | gt_width = gt_box[:, 3] - gt_box[:, 1] 220 | gt_center_y = gt_box[:, 0] + 0.5 * gt_height 221 | gt_center_x = gt_box[:, 1] + 0.5 * gt_width 222 | 223 | dy = (gt_center_y - center_y) / height 224 | dx = (gt_center_x - center_x) / width 225 | dh = np.log(gt_height / height) 226 | dw = np.log(gt_width / width) 227 | 228 | return np.stack([dy, dx, dh, dw], axis=1) 229 | 230 | 231 | ############################################################ 232 | # Dataset 233 | ############################################################ 234 | 235 | class Dataset(object): 236 | """The base class for dataset classes. 237 | To use it, create a new class that adds functions specific to the dataset 238 | you want to use. For example: 239 | 240 | class CatsAndDogsDataset(Dataset): 241 | def load_cats_and_dogs(self): 242 | ... 243 | def load_mask(self, image_id): 244 | ... 245 | def image_reference(self, image_id): 246 | ... 247 | 248 | See COCODataset and ShapesDataset as examples. 249 | """ 250 | 251 | def __init__(self, class_map=None): 252 | self._image_ids = [] 253 | self.image_info = [] 254 | # Background is always the first class 255 | self.class_info = [{"source": "", "id": 0, "name": "BG"}] 256 | self.source_class_ids = {} 257 | 258 | def add_class(self, source, class_id, class_name): 259 | assert "." not in source, "Source name cannot contain a dot" 260 | # Does the class exist already? 261 | for info in self.class_info: 262 | if info['source'] == source and info["id"] == class_id: 263 | # source.class_id combination already available, skip 264 | return 265 | # Add the class 266 | self.class_info.append({ 267 | "source": source, 268 | "id": class_id, 269 | "name": class_name, 270 | }) 271 | 272 | def add_image(self, source, image_id, path, **kwargs): 273 | image_info = { 274 | "id": image_id, 275 | "source": source, 276 | "path": path, 277 | } 278 | image_info.update(kwargs) 279 | self.image_info.append(image_info) 280 | 281 | def image_reference(self, image_id): 282 | """Return a link to the image in its source Website or details about 283 | the image that help looking it up or debugging it. 284 | 285 | Override for your dataset, but pass to this function 286 | if you encounter images not in your dataset. 287 | """ 288 | return "" 289 | 290 | def prepare(self, class_map=None): 291 | """Prepares the Dataset class for use. 292 | 293 | TODO: class map is not supported yet. When done, it should handle mapping 294 | classes from different datasets to the same class ID. 295 | """ 296 | 297 | def clean_name(name): 298 | """Returns a shorter version of object names for cleaner display.""" 299 | return ",".join(name.split(",")[:1]) 300 | 301 | # Build (or rebuild) everything else from the info dicts. 302 | self.num_classes = len(self.class_info) 303 | self.class_ids = np.arange(self.num_classes) 304 | self.class_names = [clean_name(c["name"]) for c in self.class_info] 305 | self.num_images = len(self.image_info) 306 | self._image_ids = np.arange(self.num_images) 307 | 308 | # Mapping from source class and image IDs to internal IDs 309 | self.class_from_source_map = {"{}.{}".format(info['source'], info['id']): id 310 | for info, id in zip(self.class_info, self.class_ids)} 311 | self.image_from_source_map = {"{}.{}".format(info['source'], info['id']): id 312 | for info, id in zip(self.image_info, self.image_ids)} 313 | 314 | # Map sources to class_ids they support 315 | self.sources = list(set([i['source'] for i in self.class_info])) 316 | self.source_class_ids = {} 317 | # Loop over datasets 318 | for source in self.sources: 319 | self.source_class_ids[source] = [] 320 | # Find classes that belong to this dataset 321 | for i, info in enumerate(self.class_info): 322 | # Include BG class in all datasets 323 | if i == 0 or source == info['source']: 324 | self.source_class_ids[source].append(i) 325 | 326 | def map_source_class_id(self, source_class_id): 327 | """Takes a source class ID and returns the int class ID assigned to it. 328 | 329 | For example: 330 | dataset.map_source_class_id("coco.12") -> 23 331 | """ 332 | return self.class_from_source_map[source_class_id] 333 | 334 | def get_source_class_id(self, class_id, source): 335 | """Map an internal class ID to the corresponding class ID in the source dataset.""" 336 | info = self.class_info[class_id] 337 | assert info['source'] == source 338 | return info['id'] 339 | 340 | def append_data(self, class_info, image_info): 341 | self.external_to_class_id = {} 342 | for i, c in enumerate(self.class_info): 343 | for ds, id in c["map"]: 344 | self.external_to_class_id[ds + str(id)] = i 345 | 346 | # Map external image IDs to internal ones. 347 | self.external_to_image_id = {} 348 | for i, info in enumerate(self.image_info): 349 | self.external_to_image_id[info["ds"] + str(info["id"])] = i 350 | 351 | @property 352 | def image_ids(self): 353 | return self._image_ids 354 | 355 | def source_image_link(self, image_id): 356 | """Returns the path or URL to the image. 357 | Override this to return a URL to the image if it's availble online for easy 358 | debugging. 359 | """ 360 | return self.image_info[image_id]["path"] 361 | 362 | def load_image(self, image_id): 363 | """Load the specified image and return a [H,W,3] Numpy array. 364 | """ 365 | # Load image 366 | image = skimage.io.imread(self.image_info[image_id]['path']) 367 | # If grayscale. Convert to RGB for consistency. 368 | if image.ndim != 3: 369 | image = skimage.color.gray2rgb(image) 370 | # If has an alpha channel, remove it for consistency 371 | if image.shape[-1] == 4: 372 | image = image[..., :3] 373 | return image 374 | 375 | def load_mask(self, image_id): 376 | """Load instance masks for the given image. 377 | 378 | Different datasets use different ways to store masks. Override this 379 | method to load instance masks and return them in the form of am 380 | array of binary masks of shape [height, width, instances]. 381 | 382 | Returns: 383 | masks: A bool array of shape [height, width, instance count] with 384 | a binary mask per instance. 385 | class_ids: a 1D array of class IDs of the instance masks. 386 | """ 387 | # Override this function to load a mask from your dataset. 388 | # Otherwise, it returns an empty mask. 389 | mask = np.empty([0, 0, 0]) 390 | class_ids = np.empty([0], np.int32) 391 | return mask, class_ids 392 | 393 | 394 | def resize_image(image, min_dim=None, max_dim=None, min_scale=None, mode="square"): 395 | """Resizes an image keeping the aspect ratio unchanged. 396 | 397 | min_dim: if provided, resizes the image such that it's smaller 398 | dimension == min_dim 399 | max_dim: if provided, ensures that the image longest side doesn't 400 | exceed this value. 401 | min_scale: if provided, ensure that the image is scaled up by at least 402 | this percent even if min_dim doesn't require it. 403 | mode: Resizing mode. 404 | none: No resizing. Return the image unchanged. 405 | square: Resize and pad with zeros to get a square image 406 | of size [max_dim, max_dim]. 407 | pad64: Pads width and height with zeros to make them multiples of 64. 408 | If min_dim or min_scale are provided, it scales the image up 409 | before padding. max_dim is ignored in this mode. 410 | The multiple of 64 is needed to ensure smooth scaling of feature 411 | maps up and down the 6 levels of the FPN pyramid (2**6=64). 412 | crop: Picks random crops from the image. First, scales the image based 413 | on min_dim and min_scale, then picks a random crop of 414 | size min_dim x min_dim. Can be used in training only. 415 | max_dim is not used in this mode. 416 | 417 | Returns: 418 | image: the resized image 419 | window: (y1, x1, y2, x2). If max_dim is provided, padding might 420 | be inserted in the returned image. If so, this window is the 421 | coordinates of the image part of the full image (excluding 422 | the padding). The x2, y2 pixels are not included. 423 | scale: The scale factor used to resize the image 424 | padding: Padding added to the image [(top, bottom), (left, right), (0, 0)] 425 | """ 426 | # Keep track of image dtype and return results in the same dtype 427 | image_dtype = image.dtype 428 | # Default window (y1, x1, y2, x2) and default scale == 1. 429 | h, w = image.shape[:2] 430 | window = (0, 0, h, w) 431 | scale = 1 432 | padding = [(0, 0), (0, 0), (0, 0)] 433 | crop = None 434 | 435 | if mode == "none": 436 | return image, window, scale, padding, crop 437 | 438 | # Scale? 439 | if min_dim: 440 | # Scale up but not down 441 | scale = max(1, min_dim / min(h, w)) 442 | if min_scale and scale < min_scale: 443 | scale = min_scale 444 | 445 | # Does it exceed max dim? 446 | if max_dim and mode == "square": 447 | image_max = max(h, w) 448 | if round(image_max * scale) > max_dim: 449 | scale = max_dim / image_max 450 | 451 | # Resize image using bilinear interpolation 452 | if scale != 1: 453 | image = skimage.transform.resize( 454 | image, (round(h * scale), round(w * scale)), 455 | order=1, mode="constant", preserve_range=True) 456 | 457 | # Need padding or cropping? 458 | if mode == "square": 459 | # Get new height and width 460 | h, w = image.shape[:2] 461 | top_pad = (max_dim - h) // 2 462 | bottom_pad = max_dim - h - top_pad 463 | left_pad = (max_dim - w) // 2 464 | right_pad = max_dim - w - left_pad 465 | padding = [(top_pad, bottom_pad), (left_pad, right_pad), (0, 0)] 466 | image = np.pad(image, padding, mode='constant', constant_values=0) 467 | window = (top_pad, left_pad, h + top_pad, w + left_pad) 468 | elif mode == "pad64": 469 | h, w = image.shape[:2] 470 | # Both sides must be divisible by 64 471 | assert min_dim % 64 == 0, "Minimum dimension must be a multiple of 64" 472 | # Height 473 | if h % 64 > 0: 474 | max_h = h - (h % 64) + 64 475 | top_pad = (max_h - h) // 2 476 | bottom_pad = max_h - h - top_pad 477 | else: 478 | top_pad = bottom_pad = 0 479 | # Width 480 | if w % 64 > 0: 481 | max_w = w - (w % 64) + 64 482 | left_pad = (max_w - w) // 2 483 | right_pad = max_w - w - left_pad 484 | else: 485 | left_pad = right_pad = 0 486 | padding = [(top_pad, bottom_pad), (left_pad, right_pad), (0, 0)] 487 | image = np.pad(image, padding, mode='constant', constant_values=0) 488 | window = (top_pad, left_pad, h + top_pad, w + left_pad) 489 | elif mode == "crop": 490 | # Pick a random crop 491 | h, w = image.shape[:2] 492 | y = random.randint(0, (h - min_dim)) 493 | x = random.randint(0, (w - min_dim)) 494 | crop = (y, x, min_dim, min_dim) 495 | image = image[y:y + min_dim, x:x + min_dim] 496 | window = (0, 0, min_dim, min_dim) 497 | else: 498 | raise Exception("Mode {} not supported".format(mode)) 499 | return image.astype(image_dtype), window, scale, padding, crop 500 | 501 | 502 | def resize_mask(mask, scale, padding, crop=None): 503 | """Resizes a mask using the given scale and padding. 504 | Typically, you get the scale and padding from resize_image() to 505 | ensure both, the image and the mask, are resized consistently. 506 | 507 | scale: mask scaling factor 508 | padding: Padding to add to the mask in the form 509 | [(top, bottom), (left, right), (0, 0)] 510 | """ 511 | # Suppress warning from scipy 0.13.0, the output shape of zoom() is 512 | # calculated with round() instead of int() 513 | with warnings.catch_warnings(): 514 | warnings.simplefilter("ignore") 515 | mask = scipy.ndimage.zoom(mask, zoom=[scale, scale, 1], order=0) 516 | if crop is not None: 517 | y, x, h, w = crop 518 | mask = mask[y:y + h, x:x + w] 519 | else: 520 | mask = np.pad(mask, padding, mode='constant', constant_values=0) 521 | return mask 522 | 523 | 524 | def minimize_mask(bbox, mask, mini_shape): 525 | """Resize masks to a smaller version to reduce memory load. 526 | Mini-masks can be resized back to image scale using expand_masks() 527 | 528 | See inspect_data.ipynb notebook for more details. 529 | """ 530 | mini_mask = np.zeros(mini_shape + (mask.shape[-1],), dtype=bool) 531 | for i in range(mask.shape[-1]): 532 | # Pick slice and cast to bool in case load_mask() returned wrong dtype 533 | m = mask[:, :, i].astype(bool) 534 | y1, x1, y2, x2 = bbox[i][:4] 535 | m = m[y1:y2, x1:x2] 536 | if m.size == 0: 537 | raise Exception("Invalid bounding box with area of zero") 538 | # Resize with bilinear interpolation 539 | m = skimage.transform.resize(m, mini_shape, order=1, mode="constant") 540 | mini_mask[:, :, i] = np.around(m).astype(np.bool) 541 | return mini_mask 542 | 543 | 544 | def expand_mask(bbox, mini_mask, image_shape): 545 | """Resizes mini masks back to image size. Reverses the change 546 | of minimize_mask(). 547 | 548 | See inspect_data.ipynb notebook for more details. 549 | """ 550 | mask = np.zeros(image_shape[:2] + (mini_mask.shape[-1],), dtype=bool) 551 | for i in range(mask.shape[-1]): 552 | m = mini_mask[:, :, i] 553 | y1, x1, y2, x2 = bbox[i][:4] 554 | h = y2 - y1 555 | w = x2 - x1 556 | # Resize with bilinear interpolation 557 | m = skimage.transform.resize(m, (h, w), order=1, mode="constant") 558 | mask[y1:y2, x1:x2, i] = np.around(m).astype(np.bool) 559 | return mask 560 | 561 | 562 | # TODO: Build and use this function to reduce code duplication 563 | def mold_mask(mask, config): 564 | pass 565 | 566 | 567 | def unmold_mask(mask, bbox, image_shape): 568 | """Converts a mask generated by the neural network to a format similar 569 | to its original shape. 570 | mask: [height, width] of type float. A small, typically 28x28 mask. 571 | bbox: [y1, x1, y2, x2]. The box to fit the mask in. 572 | 573 | Returns a binary mask with the same size as the original image. 574 | """ 575 | threshold = 0.5 576 | y1, x1, y2, x2 = bbox 577 | mask = skimage.transform.resize(mask, (y2 - y1, x2 - x1), order=1, mode="constant") 578 | mask = np.where(mask >= threshold, 1, 0).astype(np.bool) 579 | 580 | # Put the mask in the right location. 581 | full_mask = np.zeros(image_shape[:2], dtype=np.bool) 582 | full_mask[y1:y2, x1:x2] = mask 583 | return full_mask 584 | 585 | 586 | ############################################################ 587 | # Anchors 588 | ############################################################ 589 | 590 | def generate_anchors(scales, ratios, shape, feature_stride, anchor_stride): 591 | """ 592 | scales: 1D array of anchor sizes in pixels. Example: [32, 64, 128] 593 | ratios: 1D array of anchor ratios of width/height. Example: [0.5, 1, 2] 594 | shape: [height, width] spatial shape of the feature map over which 595 | to generate anchors. 596 | feature_stride: Stride of the feature map relative to the image in pixels. 597 | anchor_stride: Stride of anchors on the feature map. For example, if the 598 | value is 2 then generate anchors for every other feature map pixel. 599 | """ 600 | # Get all combinations of scales and ratios 601 | scales, ratios = np.meshgrid(np.array(scales), np.array(ratios)) 602 | scales = scales.flatten() 603 | ratios = ratios.flatten() 604 | 605 | # Enumerate heights and widths from scales and ratios 606 | heights = scales / np.sqrt(ratios) 607 | widths = scales * np.sqrt(ratios) 608 | 609 | # Enumerate shifts in feature space 610 | shifts_y = np.arange(0, shape[0], anchor_stride) * feature_stride 611 | shifts_x = np.arange(0, shape[1], anchor_stride) * feature_stride 612 | shifts_x, shifts_y = np.meshgrid(shifts_x, shifts_y) 613 | 614 | # Enumerate combinations of shifts, widths, and heights 615 | box_widths, box_centers_x = np.meshgrid(widths, shifts_x) 616 | box_heights, box_centers_y = np.meshgrid(heights, shifts_y) 617 | 618 | # Reshape to get a list of (y, x) and a list of (h, w) 619 | box_centers = np.stack( 620 | [box_centers_y, box_centers_x], axis=2).reshape([-1, 2]) 621 | box_sizes = np.stack([box_heights, box_widths], axis=2).reshape([-1, 2]) 622 | 623 | # Convert to corner coordinates (y1, x1, y2, x2) 624 | boxes = np.concatenate([box_centers - 0.5 * box_sizes, 625 | box_centers + 0.5 * box_sizes], axis=1) 626 | return boxes 627 | 628 | 629 | def generate_pyramid_anchors(scales, ratios, feature_shapes, feature_strides, 630 | anchor_stride): 631 | """Generate anchors at different levels of a feature pyramid. Each scale 632 | is associated with a level of the pyramid, but each ratio is used in 633 | all levels of the pyramid. 634 | 635 | Returns: 636 | anchors: [N, (y1, x1, y2, x2)]. All generated anchors in one array. Sorted 637 | with the same order of the given scales. So, anchors of scale[0] come 638 | first, then anchors of scale[1], and so on. 639 | """ 640 | # Anchors 641 | # [anchor_count, (y1, x1, y2, x2)] 642 | anchors = [] 643 | for i in range(len(scales)): 644 | anchors.append(generate_anchors(scales[i], ratios, feature_shapes[i], 645 | feature_strides[i], anchor_stride)) 646 | return np.concatenate(anchors, axis=0) 647 | 648 | 649 | ############################################################ 650 | # Miscellaneous 651 | ############################################################ 652 | 653 | def trim_zeros(x): 654 | """It's common to have tensors larger than the available data and 655 | pad with zeros. This function removes rows that are all zeros. 656 | 657 | x: [rows, columns]. 658 | """ 659 | assert len(x.shape) == 2 660 | return x[~np.all(x == 0, axis=1)] 661 | 662 | 663 | def compute_matches(gt_boxes, gt_class_ids, gt_masks, 664 | pred_boxes, pred_class_ids, pred_scores, pred_masks, 665 | iou_threshold=0.5, score_threshold=0.0): 666 | """Finds matches between prediction and ground truth instances. 667 | 668 | Returns: 669 | gt_match: 1-D array. For each GT box it has the index of the matched 670 | predicted box. 671 | pred_match: 1-D array. For each predicted box, it has the index of 672 | the matched ground truth box. 673 | overlaps: [pred_boxes, gt_boxes] IoU overlaps. 674 | """ 675 | # Trim zero padding 676 | # TODO: cleaner to do zero unpadding upstream 677 | gt_boxes = trim_zeros(gt_boxes) 678 | gt_masks = gt_masks[..., :gt_boxes.shape[0]] 679 | pred_boxes = trim_zeros(pred_boxes) 680 | pred_scores = pred_scores[:pred_boxes.shape[0]] 681 | # Sort predictions by score from high to low 682 | indices = np.argsort(pred_scores)[::-1] 683 | pred_boxes = pred_boxes[indices] 684 | pred_class_ids = pred_class_ids[indices] 685 | pred_scores = pred_scores[indices] 686 | pred_masks = pred_masks[..., indices] 687 | 688 | # Compute IoU overlaps [pred_masks, gt_masks] 689 | overlaps = compute_overlaps_masks(pred_masks, gt_masks) 690 | 691 | # Loop through predictions and find matching ground truth boxes 692 | match_count = 0 693 | pred_match = -1 * np.ones([pred_boxes.shape[0]]) 694 | gt_match = -1 * np.ones([gt_boxes.shape[0]]) 695 | for i in range(len(pred_boxes)): 696 | # Find best matching ground truth box 697 | # 1. Sort matches by score 698 | sorted_ixs = np.argsort(overlaps[i])[::-1] 699 | # 2. Remove low scores 700 | low_score_idx = np.where(overlaps[i, sorted_ixs] < score_threshold)[0] 701 | if low_score_idx.size > 0: 702 | sorted_ixs = sorted_ixs[:low_score_idx[0]] 703 | # 3. Find the match 704 | for j in sorted_ixs: 705 | # If ground truth box is already matched, go to next one 706 | if gt_match[j] > 0: 707 | continue 708 | # If we reach IoU smaller than the threshold, end the loop 709 | iou = overlaps[i, j] 710 | if iou < iou_threshold: 711 | break 712 | # Do we have a match? 713 | if pred_class_ids[i] == gt_class_ids[j]: 714 | match_count += 1 715 | gt_match[j] = i 716 | pred_match[i] = j 717 | break 718 | 719 | return gt_match, pred_match, overlaps 720 | 721 | 722 | def compute_ap(gt_boxes, gt_class_ids, gt_masks, 723 | pred_boxes, pred_class_ids, pred_scores, pred_masks, 724 | iou_threshold=0.5): 725 | """Compute Average Precision at a set IoU threshold (default 0.5). 726 | 727 | Returns: 728 | mAP: Mean Average Precision 729 | precisions: List of precisions at different class score thresholds. 730 | recalls: List of recall values at different class score thresholds. 731 | overlaps: [pred_boxes, gt_boxes] IoU overlaps. 732 | """ 733 | # Get matches and overlaps 734 | gt_match, pred_match, overlaps = compute_matches( 735 | gt_boxes, gt_class_ids, gt_masks, 736 | pred_boxes, pred_class_ids, pred_scores, pred_masks, 737 | iou_threshold) 738 | 739 | # Compute precision and recall at each prediction box step 740 | precisions = np.cumsum(pred_match > -1) / (np.arange(len(pred_match)) + 1) 741 | recalls = np.cumsum(pred_match > -1).astype(np.float32) / len(gt_match) 742 | 743 | # Pad with start and end values to simplify the math 744 | precisions = np.concatenate([[0], precisions, [0]]) 745 | recalls = np.concatenate([[0], recalls, [1]]) 746 | 747 | # Ensure precision values decrease but don't increase. This way, the 748 | # precision value at each recall threshold is the maximum it can be 749 | # for all following recall thresholds, as specified by the VOC paper. 750 | for i in range(len(precisions) - 2, -1, -1): 751 | precisions[i] = np.maximum(precisions[i], precisions[i + 1]) 752 | 753 | # Compute mean AP over recall range 754 | indices = np.where(recalls[:-1] != recalls[1:])[0] + 1 755 | mAP = np.sum((recalls[indices] - recalls[indices - 1]) * 756 | precisions[indices]) 757 | 758 | return mAP, precisions, recalls, overlaps 759 | 760 | 761 | def compute_ap_range(gt_box, gt_class_id, gt_mask, 762 | pred_box, pred_class_id, pred_score, pred_mask, 763 | iou_thresholds=None, verbose=1): 764 | """Compute AP over a range or IoU thresholds. Default range is 0.5-0.95.""" 765 | # Default is 0.5 to 0.95 with increments of 0.05 766 | iou_thresholds = iou_thresholds or np.arange(0.5, 1.0, 0.05) 767 | 768 | # Compute AP over range of IoU thresholds 769 | AP = [] 770 | for iou_threshold in iou_thresholds: 771 | ap, precisions, recalls, overlaps =\ 772 | compute_ap(gt_box, gt_class_id, gt_mask, 773 | pred_box, pred_class_id, pred_score, pred_mask, 774 | iou_threshold=iou_threshold) 775 | if verbose: 776 | print("AP @{:.2f}:\t {:.3f}".format(iou_threshold, ap)) 777 | AP.append(ap) 778 | AP = np.array(AP).mean() 779 | if verbose: 780 | print("AP @{:.2f}-{:.2f}:\t {:.3f}".format( 781 | iou_thresholds[0], iou_thresholds[-1], AP)) 782 | return AP 783 | 784 | 785 | def compute_recall(pred_boxes, gt_boxes, iou): 786 | """Compute the recall at the given IoU threshold. It's an indication 787 | of how many GT boxes were found by the given prediction boxes. 788 | 789 | pred_boxes: [N, (y1, x1, y2, x2)] in image coordinates 790 | gt_boxes: [N, (y1, x1, y2, x2)] in image coordinates 791 | """ 792 | # Measure overlaps 793 | overlaps = compute_overlaps(pred_boxes, gt_boxes) 794 | iou_max = np.max(overlaps, axis=1) 795 | iou_argmax = np.argmax(overlaps, axis=1) 796 | positive_ids = np.where(iou_max >= iou)[0] 797 | matched_gt_boxes = iou_argmax[positive_ids] 798 | 799 | recall = len(set(matched_gt_boxes)) / gt_boxes.shape[0] 800 | return recall, positive_ids 801 | 802 | 803 | # ## Batch Slicing 804 | # Some custom layers support a batch size of 1 only, and require a lot of work 805 | # to support batches greater than 1. This function slices an input tensor 806 | # across the batch dimension and feeds batches of size 1. Effectively, 807 | # an easy way to support batches > 1 quickly with little code modification. 808 | # In the long run, it's more efficient to modify the code to support large 809 | # batches and getting rid of this function. Consider this a temporary solution 810 | def batch_slice(inputs, graph_fn, batch_size, names=None): 811 | """Splits inputs into slices and feeds each slice to a copy of the given 812 | computation graph and then combines the results. It allows you to run a 813 | graph on a batch of inputs even if the graph is written to support one 814 | instance only. 815 | 816 | inputs: list of tensors. All must have the same first dimension length 817 | graph_fn: A function that returns a TF tensor that's part of a graph. 818 | batch_size: number of slices to divide the data into. 819 | names: If provided, assigns names to the resulting tensors. 820 | """ 821 | if not isinstance(inputs, list): 822 | inputs = [inputs] 823 | 824 | outputs = [] 825 | for i in range(batch_size): 826 | inputs_slice = [x[i] for x in inputs] 827 | output_slice = graph_fn(*inputs_slice) 828 | if not isinstance(output_slice, (tuple, list)): 829 | output_slice = [output_slice] 830 | outputs.append(output_slice) 831 | # Change outputs from a list of slices where each is 832 | # a list of outputs to a list of outputs and each has 833 | # a list of slices 834 | outputs = list(zip(*outputs)) 835 | 836 | if names is None: 837 | names = [None] * len(outputs) 838 | 839 | result = [tf.stack(o, axis=0, name=n) 840 | for o, n in zip(outputs, names)] 841 | if len(result) == 1: 842 | result = result[0] 843 | 844 | return result 845 | 846 | 847 | def download_trained_weights(coco_model_path, verbose=1): 848 | """Download COCO trained weights from Releases. 849 | 850 | coco_model_path: local path of COCO trained weights 851 | """ 852 | if verbose > 0: 853 | print("Downloading pretrained model to " + coco_model_path + " ...") 854 | with urllib.request.urlopen(COCO_MODEL_URL) as resp, open(coco_model_path, 'wb') as out: 855 | shutil.copyfileobj(resp, out) 856 | if verbose > 0: 857 | print("... done downloading pretrained model!") 858 | 859 | 860 | def norm_boxes(boxes, shape): 861 | """Converts boxes from pixel coordinates to normalized coordinates. 862 | boxes: [N, (y1, x1, y2, x2)] in pixel coordinates 863 | shape: [..., (height, width)] in pixels 864 | 865 | Note: In pixel coordinates (y2, x2) is outside the box. But in normalized 866 | coordinates it's inside the box. 867 | 868 | Returns: 869 | [N, (y1, x1, y2, x2)] in normalized coordinates 870 | """ 871 | h, w = shape 872 | scale = np.array([h - 1, w - 1, h - 1, w - 1]) 873 | shift = np.array([0, 0, 1, 1]) 874 | return np.divide((boxes - shift), scale).astype(np.float32) 875 | 876 | 877 | def denorm_boxes(boxes, shape): 878 | """Converts boxes from normalized coordinates to pixel coordinates. 879 | boxes: [N, (y1, x1, y2, x2)] in normalized coordinates 880 | shape: [..., (height, width)] in pixels 881 | 882 | Note: In pixel coordinates (y2, x2) is outside the box. But in normalized 883 | coordinates it's inside the box. 884 | 885 | Returns: 886 | [N, (y1, x1, y2, x2)] in pixel coordinates 887 | """ 888 | h, w = shape 889 | scale = np.array([h - 1, w - 1, h - 1, w - 1]) 890 | shift = np.array([0, 0, 1, 1]) 891 | return np.around(np.multiply(boxes, scale) + shift).astype(np.int32) 892 | -------------------------------------------------------------------------------- /Model/visualize.py: -------------------------------------------------------------------------------- 1 | """ 2 | Mask R-CNN 3 | Display and Visualization Functions. 4 | 5 | Copyright (c) 2017 Matterport, Inc. 6 | Licensed under the MIT License (see LICENSE for details) 7 | Written by Waleed Abdulla 8 | """ 9 | 10 | import os 11 | import sys 12 | import logging 13 | import random 14 | import itertools 15 | import colorsys 16 | 17 | import numpy as np 18 | from skimage.measure import find_contours 19 | import matplotlib.pyplot as plt 20 | from matplotlib import patches, lines 21 | from matplotlib.patches import Polygon 22 | import IPython.display 23 | 24 | # Root directory of the project 25 | ROOT_DIR = os.path.abspath("../") 26 | 27 | # Import Mask RCNN 28 | sys.path.append(ROOT_DIR) # To find local version of the library 29 | from Model import utils 30 | 31 | 32 | ############################################################ 33 | # Visualization 34 | ############################################################ 35 | 36 | def display_images(images, titles=None, cols=4, cmap=None, norm=None, 37 | interpolation=None): 38 | """Display the given set of images, optionally with titles. 39 | images: list or array of image tensors in HWC format. 40 | titles: optional. A list of titles to display with each image. 41 | cols: number of images per row 42 | cmap: Optional. Color map to use. For example, "Blues". 43 | norm: Optional. A Normalize instance to map values to colors. 44 | interpolation: Optional. Image interporlation to use for display. 45 | """ 46 | titles = titles if titles is not None else [""] * len(images) 47 | rows = len(images) // cols + 1 48 | plt.figure(figsize=(14, 14 * rows // cols)) 49 | i = 1 50 | for image, title in zip(images, titles): 51 | plt.subplot(rows, cols, i) 52 | plt.title(title, fontsize=9) 53 | plt.axis('off') 54 | plt.imshow(image.astype(np.uint8), cmap=cmap, 55 | norm=norm, interpolation=interpolation) 56 | i += 1 57 | plt.show() 58 | 59 | 60 | def random_colors(N, bright=True): 61 | """ 62 | Generate random colors. 63 | To get visually distinct colors, generate them in HSV space then 64 | convert to RGB. 65 | """ 66 | brightness = 1.0 if bright else 0.7 67 | hsv = [(i / N, 1, brightness) for i in range(N)] 68 | colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv)) 69 | random.shuffle(colors) 70 | return colors 71 | 72 | 73 | def apply_mask(image, mask, color, alpha=0.5): 74 | """Apply the given mask to the image. 75 | """ 76 | for c in range(3): 77 | image[:, :, c] = np.where(mask == 1, 78 | image[:, :, c] * 79 | (1 - alpha) + alpha * color[c] * 255, 80 | image[:, :, c]) 81 | return image 82 | 83 | 84 | def display_instances(image, boxes, masks, class_ids, class_names, 85 | scores=None, title="", 86 | figsize=(16, 16), ax=None, 87 | show_mask=True, show_bbox=True, 88 | colors=None, captions=None): 89 | """ 90 | boxes: [num_instance, (y1, x1, y2, x2, class_id)] in image coordinates. 91 | masks: [height, width, num_instances] 92 | class_ids: [num_instances] 93 | class_names: list of class names of the dataset 94 | scores: (optional) confidence scores for each box 95 | title: (optional) Figure title 96 | show_mask, show_bbox: To show masks and bounding boxes or not 97 | figsize: (optional) the size of the image 98 | colors: (optional) An array or colors to use with each object 99 | captions: (optional) A list of strings to use as captions for each object 100 | """ 101 | # Number of instances 102 | N = boxes.shape[0] 103 | if not N: 104 | print("\n*** No instances to display *** \n") 105 | else: 106 | assert boxes.shape[0] == masks.shape[-1] == class_ids.shape[0] 107 | 108 | # If no axis is passed, create one and automatically call show() 109 | auto_show = False 110 | if not ax: 111 | _, ax = plt.subplots(1, figsize=figsize) 112 | auto_show = True 113 | 114 | # Generate random colors 115 | colors = colors or random_colors(N) 116 | 117 | # Show area outside image boundaries. 118 | height, width = image.shape[:2] 119 | ax.set_ylim(height + 10, -10) 120 | ax.set_xlim(-10, width + 10) 121 | ax.axis('off') 122 | ax.set_title(title) 123 | 124 | masked_image = image.astype(np.uint32).copy() 125 | for i in range(N): 126 | color = colors[i] 127 | 128 | # Bounding box 129 | if not np.any(boxes[i]): 130 | # Skip this instance. Has no bbox. Likely lost in image cropping. 131 | continue 132 | y1, x1, y2, x2 = boxes[i] 133 | if show_bbox: 134 | p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2, 135 | alpha=0.7, linestyle="dashed", 136 | edgecolor=color, facecolor='none') 137 | ax.add_patch(p) 138 | 139 | # Label 140 | if not captions: 141 | class_id = class_ids[i] 142 | score = scores[i] if scores is not None else None 143 | label = class_names[class_id] 144 | x = random.randint(x1, (x1 + x2) // 2) 145 | caption = "{} {:.3f}".format(label, score) if score else label 146 | else: 147 | caption = captions[i] 148 | ax.text(x1, y1 + 8, caption, 149 | color='w', size=11, backgroundcolor="none") 150 | 151 | # Mask 152 | mask = masks[:, :, i] 153 | if show_mask: 154 | masked_image = apply_mask(masked_image, mask, color) 155 | 156 | # Mask Polygon 157 | # Pad to ensure proper polygons for masks that touch image edges. 158 | padded_mask = np.zeros( 159 | (mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8) 160 | padded_mask[1:-1, 1:-1] = mask 161 | contours = find_contours(padded_mask, 0.5) 162 | for verts in contours: 163 | # Subtract the padding and flip (y, x) to (x, y) 164 | verts = np.fliplr(verts) - 1 165 | p = Polygon(verts, facecolor="none", edgecolor=color) 166 | ax.add_patch(p) 167 | ax.imshow(masked_image.astype(np.uint8)) 168 | if auto_show: 169 | plt.show() 170 | 171 | 172 | def display_differences(image, 173 | gt_box, gt_class_id, gt_mask, 174 | pred_box, pred_class_id, pred_score, pred_mask, 175 | class_names, title="", ax=None, 176 | show_mask=True, show_box=True, 177 | iou_threshold=0.5, score_threshold=0.5): 178 | """Display ground truth and prediction instances on the same image.""" 179 | # Match predictions to ground truth 180 | gt_match, pred_match, overlaps = utils.compute_matches( 181 | gt_box, gt_class_id, gt_mask, 182 | pred_box, pred_class_id, pred_score, pred_mask, 183 | iou_threshold=iou_threshold, score_threshold=score_threshold) 184 | # Ground truth = green. Predictions = red 185 | colors = [(0, 1, 0, .8)] * len(gt_match)\ 186 | + [(1, 0, 0, 1)] * len(pred_match) 187 | # Concatenate GT and predictions 188 | class_ids = np.concatenate([gt_class_id, pred_class_id]) 189 | scores = np.concatenate([np.zeros([len(gt_match)]), pred_score]) 190 | boxes = np.concatenate([gt_box, pred_box]) 191 | masks = np.concatenate([gt_mask, pred_mask], axis=-1) 192 | # Captions per instance show score/IoU 193 | captions = ["" for m in gt_match] + ["{:.2f} / {:.2f}".format( 194 | pred_score[i], 195 | (overlaps[i, int(pred_match[i])] 196 | if pred_match[i] > -1 else overlaps[i].max())) 197 | for i in range(len(pred_match))] 198 | # Set title if not provided 199 | title = title or "Ground Truth and Detections\n GT=green, pred=red, captions: score/IoU" 200 | # Display 201 | display_instances( 202 | image, 203 | boxes, masks, class_ids, 204 | class_names, scores, ax=ax, 205 | show_bbox=show_box, show_mask=show_mask, 206 | colors=colors, captions=captions, 207 | title=title) 208 | 209 | 210 | def draw_rois(image, rois, refined_rois, mask, class_ids, class_names, limit=10): 211 | """ 212 | anchors: [n, (y1, x1, y2, x2)] list of anchors in image coordinates. 213 | proposals: [n, 4] the same anchors but refined to fit objects better. 214 | """ 215 | masked_image = image.copy() 216 | 217 | # Pick random anchors in case there are too many. 218 | ids = np.arange(rois.shape[0], dtype=np.int32) 219 | ids = np.random.choice( 220 | ids, limit, replace=False) if ids.shape[0] > limit else ids 221 | 222 | fig, ax = plt.subplots(1, figsize=(12, 12)) 223 | if rois.shape[0] > limit: 224 | plt.title("Showing {} random ROIs out of {}".format( 225 | len(ids), rois.shape[0])) 226 | else: 227 | plt.title("{} ROIs".format(len(ids))) 228 | 229 | # Show area outside image boundaries. 230 | ax.set_ylim(image.shape[0] + 20, -20) 231 | ax.set_xlim(-50, image.shape[1] + 20) 232 | ax.axis('off') 233 | 234 | for i, id in enumerate(ids): 235 | color = np.random.rand(3) 236 | class_id = class_ids[id] 237 | # ROI 238 | y1, x1, y2, x2 = rois[id] 239 | p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2, 240 | edgecolor=color if class_id else "gray", 241 | facecolor='none', linestyle="dashed") 242 | ax.add_patch(p) 243 | # Refined ROI 244 | if class_id: 245 | ry1, rx1, ry2, rx2 = refined_rois[id] 246 | p = patches.Rectangle((rx1, ry1), rx2 - rx1, ry2 - ry1, linewidth=2, 247 | edgecolor=color, facecolor='none') 248 | ax.add_patch(p) 249 | # Connect the top-left corners of the anchor and proposal for easy visualization 250 | ax.add_line(lines.Line2D([x1, rx1], [y1, ry1], color=color)) 251 | 252 | # Label 253 | label = class_names[class_id] 254 | ax.text(rx1, ry1 + 8, "{}".format(label), 255 | color='w', size=11, backgroundcolor="none") 256 | 257 | # Mask 258 | m = utils.unmold_mask(mask[id], rois[id] 259 | [:4].astype(np.int32), image.shape) 260 | masked_image = apply_mask(masked_image, m, color) 261 | 262 | ax.imshow(masked_image) 263 | 264 | # Print stats 265 | print("Positive ROIs: ", class_ids[class_ids > 0].shape[0]) 266 | print("Negative ROIs: ", class_ids[class_ids == 0].shape[0]) 267 | print("Positive Ratio: {:.2f}".format( 268 | class_ids[class_ids > 0].shape[0] / class_ids.shape[0])) 269 | 270 | 271 | # TODO: Replace with matplotlib equivalent? 272 | def draw_box(image, box, color): 273 | """Draw 3-pixel width bounding boxes on the given image array. 274 | color: list of 3 int values for RGB. 275 | """ 276 | y1, x1, y2, x2 = box 277 | image[y1:y1 + 2, x1:x2] = color 278 | image[y2:y2 + 2, x1:x2] = color 279 | image[y1:y2, x1:x1 + 2] = color 280 | image[y1:y2, x2:x2 + 2] = color 281 | return image 282 | 283 | 284 | def display_top_masks(image, mask, class_ids, class_names, limit=4): 285 | """Display the given image and the top few class masks.""" 286 | to_display = [] 287 | titles = [] 288 | to_display.append(image) 289 | titles.append("H x W={}x{}".format(image.shape[0], image.shape[1])) 290 | # Pick top prominent classes in this image 291 | unique_class_ids = np.unique(class_ids) 292 | mask_area = [np.sum(mask[:, :, np.where(class_ids == i)[0]]) 293 | for i in unique_class_ids] 294 | top_ids = [v[0] for v in sorted(zip(unique_class_ids, mask_area), 295 | key=lambda r: r[1], reverse=True) if v[1] > 0] 296 | # Generate images and titles 297 | for i in range(limit): 298 | class_id = top_ids[i] if i < len(top_ids) else -1 299 | # Pull masks of instances belonging to the same class. 300 | m = mask[:, :, np.where(class_ids == class_id)[0]] 301 | m = np.sum(m * np.arange(1, m.shape[-1] + 1), -1) 302 | to_display.append(m) 303 | titles.append(class_names[class_id] if class_id != -1 else "-") 304 | display_images(to_display, titles=titles, cols=limit + 1, cmap="Blues_r") 305 | 306 | 307 | def plot_precision_recall(AP, precisions, recalls): 308 | """Draw the precision-recall curve. 309 | 310 | AP: Average precision at IoU >= 0.5 311 | precisions: list of precision values 312 | recalls: list of recall values 313 | """ 314 | # Plot the Precision-Recall curve 315 | _, ax = plt.subplots(1) 316 | ax.set_title("Precision-Recall Curve. AP@50 = {:.3f}".format(AP)) 317 | ax.set_ylim(0, 1.1) 318 | ax.set_xlim(0, 1.1) 319 | _ = ax.plot(recalls, precisions) 320 | 321 | 322 | def plot_overlaps(gt_class_ids, pred_class_ids, pred_scores, 323 | overlaps, class_names, threshold=0.5): 324 | """Draw a grid showing how ground truth objects are classified. 325 | gt_class_ids: [N] int. Ground truth class IDs 326 | pred_class_id: [N] int. Predicted class IDs 327 | pred_scores: [N] float. The probability scores of predicted classes 328 | overlaps: [pred_boxes, gt_boxes] IoU overlaps of predictins and GT boxes. 329 | class_names: list of all class names in the dataset 330 | threshold: Float. The prediction probability required to predict a class 331 | """ 332 | gt_class_ids = gt_class_ids[gt_class_ids != 0] 333 | pred_class_ids = pred_class_ids[pred_class_ids != 0] 334 | 335 | plt.figure(figsize=(12, 10)) 336 | plt.imshow(overlaps, interpolation='nearest', cmap=plt.cm.Blues) 337 | plt.yticks(np.arange(len(pred_class_ids)), 338 | ["{} ({:.2f})".format(class_names[int(id)], pred_scores[i]) 339 | for i, id in enumerate(pred_class_ids)]) 340 | plt.xticks(np.arange(len(gt_class_ids)), 341 | [class_names[int(id)] for id in gt_class_ids], rotation=90) 342 | 343 | thresh = overlaps.max() / 2. 344 | for i, j in itertools.product(range(overlaps.shape[0]), 345 | range(overlaps.shape[1])): 346 | text = "" 347 | if overlaps[i, j] > threshold: 348 | text = "match" if gt_class_ids[j] == pred_class_ids[i] else "wrong" 349 | color = ("white" if overlaps[i, j] > thresh 350 | else "black" if overlaps[i, j] > 0 351 | else "grey") 352 | plt.text(j, i, "{:.3f}\n{}".format(overlaps[i, j], text), 353 | horizontalalignment="center", verticalalignment="center", 354 | fontsize=9, color=color) 355 | 356 | plt.tight_layout() 357 | plt.xlabel("Ground Truth") 358 | plt.ylabel("Predictions") 359 | 360 | 361 | def draw_boxes(image, boxes=None, refined_boxes=None, 362 | masks=None, captions=None, visibilities=None, 363 | title="", ax=None): 364 | """Draw bounding boxes and segmentation masks with differnt 365 | customizations. 366 | 367 | boxes: [N, (y1, x1, y2, x2, class_id)] in image coordinates. 368 | refined_boxes: Like boxes, but draw with solid lines to show 369 | that they're the result of refining 'boxes'. 370 | masks: [N, height, width] 371 | captions: List of N titles to display on each box 372 | visibilities: (optional) List of values of 0, 1, or 2. Determine how 373 | prominant each bounding box should be. 374 | title: An optional title to show over the image 375 | ax: (optional) Matplotlib axis to draw on. 376 | """ 377 | # Number of boxes 378 | assert boxes is not None or refined_boxes is not None 379 | N = boxes.shape[0] if boxes is not None else refined_boxes.shape[0] 380 | 381 | # Matplotlib Axis 382 | if not ax: 383 | _, ax = plt.subplots(1, figsize=(12, 12)) 384 | 385 | # Generate random colors 386 | colors = random_colors(N) 387 | 388 | # Show area outside image boundaries. 389 | margin = image.shape[0] // 10 390 | ax.set_ylim(image.shape[0] + margin, -margin) 391 | ax.set_xlim(-margin, image.shape[1] + margin) 392 | ax.axis('off') 393 | 394 | ax.set_title(title) 395 | 396 | masked_image = image.astype(np.uint32).copy() 397 | for i in range(N): 398 | # Box visibility 399 | visibility = visibilities[i] if visibilities is not None else 1 400 | if visibility == 0: 401 | color = "gray" 402 | style = "dotted" 403 | alpha = 0.5 404 | elif visibility == 1: 405 | color = colors[i] 406 | style = "dotted" 407 | alpha = 1 408 | elif visibility == 2: 409 | color = colors[i] 410 | style = "solid" 411 | alpha = 1 412 | 413 | # Boxes 414 | if boxes is not None: 415 | if not np.any(boxes[i]): 416 | # Skip this instance. Has no bbox. Likely lost in cropping. 417 | continue 418 | y1, x1, y2, x2 = boxes[i] 419 | p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2, 420 | alpha=alpha, linestyle=style, 421 | edgecolor=color, facecolor='none') 422 | ax.add_patch(p) 423 | 424 | # Refined boxes 425 | if refined_boxes is not None and visibility > 0: 426 | ry1, rx1, ry2, rx2 = refined_boxes[i].astype(np.int32) 427 | p = patches.Rectangle((rx1, ry1), rx2 - rx1, ry2 - ry1, linewidth=2, 428 | edgecolor=color, facecolor='none') 429 | ax.add_patch(p) 430 | # Connect the top-left corners of the anchor and proposal 431 | if boxes is not None: 432 | ax.add_line(lines.Line2D([x1, rx1], [y1, ry1], color=color)) 433 | 434 | # Captions 435 | if captions is not None: 436 | caption = captions[i] 437 | # If there are refined boxes, display captions on them 438 | if refined_boxes is not None: 439 | y1, x1, y2, x2 = ry1, rx1, ry2, rx2 440 | x = random.randint(x1, (x1 + x2) // 2) 441 | ax.text(x1, y1, caption, size=11, verticalalignment='top', 442 | color='w', backgroundcolor="none", 443 | bbox={'facecolor': color, 'alpha': 0.5, 444 | 'pad': 2, 'edgecolor': 'none'}) 445 | 446 | # Masks 447 | if masks is not None: 448 | mask = masks[:, :, i] 449 | masked_image = apply_mask(masked_image, mask, color) 450 | # Mask Polygon 451 | # Pad to ensure proper polygons for masks that touch image edges. 452 | padded_mask = np.zeros( 453 | (mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8) 454 | padded_mask[1:-1, 1:-1] = mask 455 | contours = find_contours(padded_mask, 0.5) 456 | for verts in contours: 457 | # Subtract the padding and flip (y, x) to (x, y) 458 | verts = np.fliplr(verts) - 1 459 | p = Polygon(verts, facecolor="none", edgecolor=color) 460 | ax.add_patch(p) 461 | ax.imshow(masked_image.astype(np.uint8)) 462 | 463 | 464 | def display_table(table): 465 | """Display values in a table format. 466 | table: an iterable of rows, and each row is an iterable of values. 467 | """ 468 | html = "" 469 | for row in table: 470 | row_html = "" 471 | for col in row: 472 | row_html += "