├── CNN_Implementation.ipynb ├── Model ├── a.txt └── model.h5 ├── Project_21C02.ipynb ├── README.md └── requirements.txt /CNN_Implementation.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "nbformat": 4, 3 | "nbformat_minor": 0, 4 | "metadata": { 5 | "colab": { 6 | "name": "CNN_Implementation.ipynb", 7 | "provenance": [], 8 | "authorship_tag": "ABX9TyOeeY6XRSB3SsQGHW7mkKJ8" 9 | }, 10 | "kernelspec": { 11 | "name": "python3", 12 | "display_name": "Python 3" 13 | }, 14 | "accelerator": "GPU" 15 | }, 16 | "cells": [ 17 | { 18 | "cell_type": "markdown", 19 | "metadata": { 20 | "id": "5xVALNYBurlN" 21 | }, 22 | "source": [ 23 | "# **SAR Target/Object Detection Using Deep Learning(CNN)**\n", 24 | "\n", 25 | "The notebook represents real time implementation of data processing, model building and testing of CNN Algorithm for detection of the target in MSTAR dataset." 26 | ] 27 | }, 28 | { 29 | "cell_type": "code", 30 | "metadata": { 31 | "id": "glfdH__9bHpg" 32 | }, 33 | "source": [ 34 | "from keras.layers import Conv2D, MaxPooling2D\n", 35 | "from keras.layers import Activation, Dropout, Flatten, Dense,Input,Multiply\n", 36 | "from keras.optimizers import SGD\n", 37 | "from keras.preprocessing.image import ImageDataGenerator\n", 38 | "from keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D\n", 39 | "from keras.layers import Activation, Dropout, Flatten, Dense\n", 40 | "import numpy as np\n", 41 | "from keras import backend as K\n", 42 | "from keras.models import Model\n", 43 | "from keras.callbacks import EarlyStopping" 44 | ], 45 | "execution_count": null, 46 | "outputs": [] 47 | }, 48 | { 49 | "cell_type": "code", 50 | "metadata": { 51 | "id": "MKrDUjkKbWSX" 52 | }, 53 | "source": [ 54 | "train_data_dir = '/content/gdrive/My Drive/data/train/'\n", 55 | "validation_data_dir = '/content/gdrive/My Drive/data/test/'" 56 | ], 57 | "execution_count": null, 58 | "outputs": [] 59 | }, 60 | { 61 | "cell_type": "code", 62 | "metadata": { 63 | "id": "62O-IUmibwdU" 64 | }, 65 | "source": [ 66 | "img_height, img_width=100,100\n", 67 | "epochs = 100\n", 68 | "batch_size = 16\n", 69 | "nb_train_samples = 2049\n", 70 | "nb_validation_samples = 1838" 71 | ], 72 | "execution_count": null, 73 | "outputs": [] 74 | }, 75 | { 76 | "cell_type": "code", 77 | "metadata": { 78 | "id": "vHPUTywxb0zk" 79 | }, 80 | "source": [ 81 | "train_datagen = ImageDataGenerator(\n", 82 | " rescale=1./255,\n", 83 | " shear_range=0.2,\n", 84 | " rotation_range=10.,\n", 85 | " zoom_range=0.2,\n", 86 | " horizontal_flip=True)\n", 87 | "test_datagen = ImageDataGenerator(rescale=1./255)" 88 | ], 89 | "execution_count": null, 90 | "outputs": [] 91 | }, 92 | { 93 | "cell_type": "code", 94 | "metadata": { 95 | "colab": { 96 | "base_uri": "https://localhost:8080/" 97 | }, 98 | "id": "SNL-WBLHb4SU", 99 | "outputId": "f5baf38c-cf14-4224-cd43-146ea75dd65a" 100 | }, 101 | "source": [ 102 | "train_generator = train_datagen.flow_from_directory(\n", 103 | " train_data_dir,\n", 104 | " target_size=(img_height, img_width),\n", 105 | " batch_size=batch_size,\n", 106 | " class_mode='categorical')" 107 | ], 108 | "execution_count": null, 109 | "outputs": [ 110 | { 111 | "output_type": "stream", 112 | "text": [ 113 | "Found 2536 images belonging to 10 classes.\n" 114 | ], 115 | "name": "stdout" 116 | } 117 | ] 118 | }, 119 | { 120 | "cell_type": "code", 121 | "metadata": { 122 | "colab": { 123 | "base_uri": "https://localhost:8080/" 124 | }, 125 | "id": "NepEi6I5b-Kc", 126 | "outputId": "926a5e48-ea6a-4da5-adaa-5813e950093f" 127 | }, 128 | "source": [ 129 | "\n", 130 | "validation_generator = test_datagen.flow_from_directory(\n", 131 | " validation_data_dir,\n", 132 | " target_size=(img_height, img_width),\n", 133 | " batch_size=batch_size,\n", 134 | " class_mode='categorical')" 135 | ], 136 | "execution_count": null, 137 | "outputs": [ 138 | { 139 | "output_type": "stream", 140 | "text": [ 141 | "Found 2636 images belonging to 10 classes.\n" 142 | ], 143 | "name": "stdout" 144 | } 145 | ] 146 | }, 147 | { 148 | "cell_type": "code", 149 | "metadata": { 150 | "id": "xqzfxBFTcLRk" 151 | }, 152 | "source": [ 153 | "inputs = Input(shape=(100,100,3))\n", 154 | "x = Conv2D(8, kernel_size=(5, 5), activation='relu')(inputs)\n", 155 | "x = MaxPooling2D(pool_size=(2, 2))(x)\n", 156 | "x = Conv2D(16, kernel_size=(5, 5), activation='relu')(x)\n", 157 | "x = MaxPooling2D(pool_size=(2, 2))(x)\n", 158 | "x = Conv2D(32, kernel_size=(5, 5), activation='relu')(x)\n", 159 | "x = MaxPooling2D(pool_size=(2, 2))(x)\n", 160 | "x = Flatten()(x)\n", 161 | "x = Dense(120, activation='relu')(x)\n", 162 | "x = Dense(84, activation='relu')(x)\n", 163 | "x = Dense(10, activation='softmax')(x)\n", 164 | "cnn_model = Model(inputs=inputs,outputs = x)" 165 | ], 166 | "execution_count": null, 167 | "outputs": [] 168 | }, 169 | { 170 | "cell_type": "code", 171 | "metadata": { 172 | "id": "LVhSjedycS_l" 173 | }, 174 | "source": [ 175 | "cnn_model.compile(loss='categorical_crossentropy',\n", 176 | " optimizer = SGD(lr=1e-3,momentum=0.9),#SGD(lr=1e-3,momentum=0.9)\n", 177 | " metrics=['accuracy'])" 178 | ], 179 | "execution_count": null, 180 | "outputs": [] 181 | }, 182 | { 183 | "cell_type": "code", 184 | "metadata": { 185 | "colab": { 186 | "base_uri": "https://localhost:8080/" 187 | }, 188 | "id": "ANdHQwHGceVM", 189 | "outputId": "695012aa-98ed-4d20-84fb-89a3f07beea3" 190 | }, 191 | "source": [ 192 | "cnn_model_history = cnn_model.fit_generator(\n", 193 | " train_generator,\n", 194 | " steps_per_epoch=nb_train_samples// batch_size ,\n", 195 | " epochs=epochs,\n", 196 | " validation_data=validation_generator,\n", 197 | " validation_steps=nb_validation_samples// batch_size,\n", 198 | " workers = 4)" 199 | ], 200 | "execution_count": null, 201 | "outputs": [ 202 | { 203 | "output_type": "stream", 204 | "text": [ 205 | "WARNING:tensorflow:From :7: Model.fit_generator (from tensorflow.python.keras.engine.training) is deprecated and will be removed in a future version.\n", 206 | "Instructions for updating:\n", 207 | "Please use Model.fit, which supports generators.\n", 208 | "Epoch 1/100\n", 209 | "128/128 [==============================] - 388s 3s/step - loss: 2.2076 - accuracy: 0.1975 - val_loss: 1.8933 - val_accuracy: 0.3361\n", 210 | "Epoch 2/100\n", 211 | "128/128 [==============================] - 83s 647ms/step - loss: 1.5392 - accuracy: 0.4043 - val_loss: 1.2851 - val_accuracy: 0.5115\n", 212 | "Epoch 3/100\n", 213 | "128/128 [==============================] - 24s 186ms/step - loss: 1.1808 - accuracy: 0.5549 - val_loss: 1.0628 - val_accuracy: 0.5927\n", 214 | "Epoch 4/100\n", 215 | "128/128 [==============================] - 11s 89ms/step - loss: 1.0083 - accuracy: 0.6118 - val_loss: 0.8312 - val_accuracy: 0.6694\n", 216 | "Epoch 5/100\n", 217 | "128/128 [==============================] - 9s 72ms/step - loss: 0.8184 - accuracy: 0.6814 - val_loss: 0.6064 - val_accuracy: 0.7615\n", 218 | "Epoch 6/100\n", 219 | "128/128 [==============================] - 8s 66ms/step - loss: 0.6621 - accuracy: 0.7495 - val_loss: 0.5486 - val_accuracy: 0.8021\n", 220 | "Epoch 7/100\n", 221 | "128/128 [==============================] - 8s 60ms/step - loss: 0.6257 - accuracy: 0.7569 - val_loss: 0.4236 - val_accuracy: 0.8410\n", 222 | "Epoch 8/100\n", 223 | "128/128 [==============================] - 8s 60ms/step - loss: 0.5495 - accuracy: 0.7881 - val_loss: 0.4765 - val_accuracy: 0.8279\n", 224 | "Epoch 9/100\n", 225 | "128/128 [==============================] - 8s 59ms/step - loss: 0.5199 - accuracy: 0.8015 - val_loss: 0.4109 - val_accuracy: 0.8459\n", 226 | "Epoch 10/100\n", 227 | "128/128 [==============================] - 8s 61ms/step - loss: 0.4305 - accuracy: 0.8442 - val_loss: 0.4397 - val_accuracy: 0.8317\n", 228 | "Epoch 11/100\n", 229 | "128/128 [==============================] - 8s 60ms/step - loss: 0.4257 - accuracy: 0.8392 - val_loss: 0.3025 - val_accuracy: 0.9030\n", 230 | "Epoch 12/100\n", 231 | "128/128 [==============================] - 8s 60ms/step - loss: 0.3895 - accuracy: 0.8603 - val_loss: 0.2770 - val_accuracy: 0.9024\n", 232 | "Epoch 13/100\n", 233 | "128/128 [==============================] - 8s 59ms/step - loss: 0.3425 - accuracy: 0.8789 - val_loss: 0.4398 - val_accuracy: 0.8262\n", 234 | "Epoch 14/100\n", 235 | "128/128 [==============================] - 8s 59ms/step - loss: 0.3781 - accuracy: 0.8535 - val_loss: 0.3070 - val_accuracy: 0.8843\n", 236 | "Epoch 15/100\n", 237 | "128/128 [==============================] - 7s 58ms/step - loss: 0.3112 - accuracy: 0.8897 - val_loss: 0.2531 - val_accuracy: 0.9211\n", 238 | "Epoch 16/100\n", 239 | "128/128 [==============================] - 8s 59ms/step - loss: 0.2774 - accuracy: 0.8922 - val_loss: 0.2624 - val_accuracy: 0.9106\n", 240 | "Epoch 17/100\n", 241 | "128/128 [==============================] - 7s 58ms/step - loss: 0.2985 - accuracy: 0.8848 - val_loss: 0.1817 - val_accuracy: 0.9353\n", 242 | "Epoch 18/100\n", 243 | "128/128 [==============================] - 8s 59ms/step - loss: 0.2688 - accuracy: 0.9029 - val_loss: 0.2048 - val_accuracy: 0.9254\n", 244 | "Epoch 19/100\n", 245 | "128/128 [==============================] - 8s 61ms/step - loss: 0.2353 - accuracy: 0.9196 - val_loss: 0.2247 - val_accuracy: 0.9156\n", 246 | "Epoch 20/100\n", 247 | "128/128 [==============================] - 8s 60ms/step - loss: 0.2345 - accuracy: 0.9167 - val_loss: 0.2636 - val_accuracy: 0.8925\n", 248 | "Epoch 21/100\n", 249 | "128/128 [==============================] - 8s 59ms/step - loss: 0.2211 - accuracy: 0.9221 - val_loss: 0.1199 - val_accuracy: 0.9589\n", 250 | "Epoch 22/100\n", 251 | "128/128 [==============================] - 8s 60ms/step - loss: 0.2141 - accuracy: 0.9243 - val_loss: 0.1927 - val_accuracy: 0.9282\n", 252 | "Epoch 23/100\n", 253 | "128/128 [==============================] - 7s 58ms/step - loss: 0.2025 - accuracy: 0.9211 - val_loss: 0.1290 - val_accuracy: 0.9534\n", 254 | "Epoch 24/100\n", 255 | "128/128 [==============================] - 7s 58ms/step - loss: 0.1849 - accuracy: 0.9338 - val_loss: 0.1184 - val_accuracy: 0.9644\n", 256 | "Epoch 25/100\n", 257 | "128/128 [==============================] - 8s 59ms/step - loss: 0.1799 - accuracy: 0.9319 - val_loss: 0.1898 - val_accuracy: 0.9309\n", 258 | "Epoch 26/100\n", 259 | "128/128 [==============================] - 8s 59ms/step - loss: 0.1656 - accuracy: 0.9500 - val_loss: 0.1159 - val_accuracy: 0.9589\n", 260 | "Epoch 27/100\n", 261 | "128/128 [==============================] - 8s 59ms/step - loss: 0.1538 - accuracy: 0.9407 - val_loss: 0.0873 - val_accuracy: 0.9715\n", 262 | "Epoch 28/100\n", 263 | "128/128 [==============================] - 7s 58ms/step - loss: 0.1424 - accuracy: 0.9505 - val_loss: 0.1055 - val_accuracy: 0.9644\n", 264 | "Epoch 29/100\n", 265 | "128/128 [==============================] - 8s 59ms/step - loss: 0.1683 - accuracy: 0.9426 - val_loss: 0.1317 - val_accuracy: 0.9523\n", 266 | "Epoch 30/100\n", 267 | "128/128 [==============================] - 8s 59ms/step - loss: 0.1466 - accuracy: 0.9451 - val_loss: 0.1339 - val_accuracy: 0.9490\n", 268 | "Epoch 31/100\n", 269 | "128/128 [==============================] - 8s 60ms/step - loss: 0.1388 - accuracy: 0.9546 - val_loss: 0.0982 - val_accuracy: 0.9693\n", 270 | "Epoch 32/100\n", 271 | "128/128 [==============================] - 8s 60ms/step - loss: 0.1285 - accuracy: 0.9578 - val_loss: 0.0587 - val_accuracy: 0.9830\n", 272 | "Epoch 33/100\n", 273 | "128/128 [==============================] - 7s 58ms/step - loss: 0.1058 - accuracy: 0.9652 - val_loss: 0.0838 - val_accuracy: 0.9704\n", 274 | "Epoch 34/100\n", 275 | "128/128 [==============================] - 7s 58ms/step - loss: 0.1750 - accuracy: 0.9422 - val_loss: 0.0972 - val_accuracy: 0.9731\n", 276 | "Epoch 35/100\n", 277 | "128/128 [==============================] - 7s 58ms/step - loss: 0.0867 - accuracy: 0.9676 - val_loss: 0.0629 - val_accuracy: 0.9775\n", 278 | "Epoch 36/100\n", 279 | "128/128 [==============================] - 8s 60ms/step - loss: 0.1341 - accuracy: 0.9539 - val_loss: 0.0944 - val_accuracy: 0.9649\n", 280 | "Epoch 37/100\n", 281 | "128/128 [==============================] - 7s 58ms/step - loss: 0.0823 - accuracy: 0.9725 - val_loss: 0.0936 - val_accuracy: 0.9655\n", 282 | "Epoch 38/100\n", 283 | "128/128 [==============================] - 8s 59ms/step - loss: 0.1071 - accuracy: 0.9647 - val_loss: 0.0805 - val_accuracy: 0.9731\n", 284 | "Epoch 39/100\n", 285 | "128/128 [==============================] - 8s 60ms/step - loss: 0.0990 - accuracy: 0.9648 - val_loss: 0.0869 - val_accuracy: 0.9748\n", 286 | "Epoch 40/100\n", 287 | "128/128 [==============================] - 8s 59ms/step - loss: 0.1100 - accuracy: 0.9619 - val_loss: 0.0623 - val_accuracy: 0.9803\n", 288 | "Epoch 41/100\n", 289 | "128/128 [==============================] - 8s 59ms/step - loss: 0.1034 - accuracy: 0.9618 - val_loss: 0.0887 - val_accuracy: 0.9677\n", 290 | "Epoch 42/100\n", 291 | "128/128 [==============================] - 8s 59ms/step - loss: 0.0964 - accuracy: 0.9639 - val_loss: 0.0750 - val_accuracy: 0.9764\n", 292 | "Epoch 43/100\n", 293 | "128/128 [==============================] - 8s 61ms/step - loss: 0.0782 - accuracy: 0.9692 - val_loss: 0.0619 - val_accuracy: 0.9781\n", 294 | "Epoch 44/100\n", 295 | "128/128 [==============================] - 8s 59ms/step - loss: 0.0924 - accuracy: 0.9676 - val_loss: 0.1019 - val_accuracy: 0.9616\n", 296 | "Epoch 45/100\n", 297 | "128/128 [==============================] - 8s 59ms/step - loss: 0.0811 - accuracy: 0.9696 - val_loss: 0.0626 - val_accuracy: 0.9742\n", 298 | "Epoch 46/100\n", 299 | "128/128 [==============================] - 7s 59ms/step - loss: 0.0823 - accuracy: 0.9706 - val_loss: 0.0816 - val_accuracy: 0.9709\n", 300 | "Epoch 47/100\n", 301 | "128/128 [==============================] - 8s 60ms/step - loss: 0.1003 - accuracy: 0.9688 - val_loss: 0.0877 - val_accuracy: 0.9726\n", 302 | "Epoch 48/100\n", 303 | "128/128 [==============================] - 8s 60ms/step - loss: 0.0806 - accuracy: 0.9701 - val_loss: 0.0501 - val_accuracy: 0.9846\n", 304 | "Epoch 49/100\n", 305 | "128/128 [==============================] - 8s 59ms/step - loss: 0.0697 - accuracy: 0.9780 - val_loss: 0.0932 - val_accuracy: 0.9693\n", 306 | "Epoch 50/100\n", 307 | "128/128 [==============================] - 8s 60ms/step - loss: 0.0624 - accuracy: 0.9775 - val_loss: 0.0496 - val_accuracy: 0.9857\n", 308 | "Epoch 51/100\n", 309 | "128/128 [==============================] - 8s 60ms/step - loss: 0.0727 - accuracy: 0.9725 - val_loss: 0.0756 - val_accuracy: 0.9731\n", 310 | "Epoch 52/100\n", 311 | "128/128 [==============================] - 8s 61ms/step - loss: 0.0770 - accuracy: 0.9750 - val_loss: 0.0591 - val_accuracy: 0.9819\n", 312 | "Epoch 53/100\n", 313 | "128/128 [==============================] - 8s 60ms/step - loss: 0.0384 - accuracy: 0.9893 - val_loss: 0.0526 - val_accuracy: 0.9819\n", 314 | "Epoch 54/100\n", 315 | "128/128 [==============================] - 8s 59ms/step - loss: 0.0754 - accuracy: 0.9736 - val_loss: 0.1516 - val_accuracy: 0.9567\n", 316 | "Epoch 55/100\n", 317 | "128/128 [==============================] - 8s 60ms/step - loss: 0.0661 - accuracy: 0.9789 - val_loss: 0.1552 - val_accuracy: 0.9518\n", 318 | "Epoch 56/100\n", 319 | "128/128 [==============================] - 8s 60ms/step - loss: 0.0554 - accuracy: 0.9794 - val_loss: 0.0563 - val_accuracy: 0.9808\n", 320 | "Epoch 57/100\n", 321 | "128/128 [==============================] - 8s 59ms/step - loss: 0.0699 - accuracy: 0.9725 - val_loss: 0.0522 - val_accuracy: 0.9830\n", 322 | "Epoch 58/100\n", 323 | "128/128 [==============================] - 8s 60ms/step - loss: 0.0601 - accuracy: 0.9824 - val_loss: 0.0534 - val_accuracy: 0.9819\n", 324 | "Epoch 59/100\n", 325 | "128/128 [==============================] - 8s 62ms/step - loss: 0.0524 - accuracy: 0.9868 - val_loss: 0.0385 - val_accuracy: 0.9852\n", 326 | "Epoch 60/100\n", 327 | "128/128 [==============================] - 8s 62ms/step - loss: 0.0522 - accuracy: 0.9824 - val_loss: 0.0990 - val_accuracy: 0.9655\n", 328 | "Epoch 61/100\n", 329 | "128/128 [==============================] - 8s 59ms/step - loss: 0.0729 - accuracy: 0.9727 - val_loss: 0.0913 - val_accuracy: 0.9688\n", 330 | "Epoch 62/100\n", 331 | "128/128 [==============================] - 8s 60ms/step - loss: 0.0696 - accuracy: 0.9735 - val_loss: 0.1237 - val_accuracy: 0.9529\n", 332 | "Epoch 63/100\n", 333 | "128/128 [==============================] - 8s 60ms/step - loss: 0.0464 - accuracy: 0.9819 - val_loss: 0.0469 - val_accuracy: 0.9852\n", 334 | "Epoch 64/100\n", 335 | "128/128 [==============================] - 8s 60ms/step - loss: 0.0369 - accuracy: 0.9843 - val_loss: 0.0464 - val_accuracy: 0.9836\n", 336 | "Epoch 65/100\n", 337 | "128/128 [==============================] - 8s 60ms/step - loss: 0.0437 - accuracy: 0.9873 - val_loss: 0.0405 - val_accuracy: 0.9857\n", 338 | "Epoch 66/100\n", 339 | "128/128 [==============================] - 8s 59ms/step - loss: 0.0458 - accuracy: 0.9877 - val_loss: 0.0683 - val_accuracy: 0.9770\n", 340 | "Epoch 67/100\n", 341 | "128/128 [==============================] - 8s 60ms/step - loss: 0.0372 - accuracy: 0.9853 - val_loss: 0.0573 - val_accuracy: 0.9803\n", 342 | "Epoch 68/100\n", 343 | "128/128 [==============================] - 8s 61ms/step - loss: 0.0375 - accuracy: 0.9873 - val_loss: 0.0449 - val_accuracy: 0.9819\n", 344 | "Epoch 69/100\n", 345 | "128/128 [==============================] - 8s 61ms/step - loss: 0.0543 - accuracy: 0.9809 - val_loss: 0.0693 - val_accuracy: 0.9709\n", 346 | "Epoch 70/100\n", 347 | "128/128 [==============================] - 8s 59ms/step - loss: 0.0386 - accuracy: 0.9868 - val_loss: 0.0544 - val_accuracy: 0.9797\n", 348 | "Epoch 71/100\n", 349 | "128/128 [==============================] - 8s 60ms/step - loss: 0.0468 - accuracy: 0.9858 - val_loss: 0.0540 - val_accuracy: 0.9836\n", 350 | "Epoch 72/100\n", 351 | "128/128 [==============================] - 8s 60ms/step - loss: 0.0588 - accuracy: 0.9794 - val_loss: 0.0641 - val_accuracy: 0.9786\n", 352 | "Epoch 73/100\n", 353 | "128/128 [==============================] - 8s 60ms/step - loss: 0.0438 - accuracy: 0.9863 - val_loss: 0.0459 - val_accuracy: 0.9846\n", 354 | "Epoch 74/100\n", 355 | "128/128 [==============================] - 8s 61ms/step - loss: 0.0478 - accuracy: 0.9843 - val_loss: 0.0612 - val_accuracy: 0.9841\n", 356 | "Epoch 75/100\n", 357 | "128/128 [==============================] - 8s 61ms/step - loss: 0.0461 - accuracy: 0.9843 - val_loss: 0.0410 - val_accuracy: 0.9852\n", 358 | "Epoch 76/100\n", 359 | "128/128 [==============================] - 8s 61ms/step - loss: 0.0304 - accuracy: 0.9897 - val_loss: 0.0403 - val_accuracy: 0.9896\n", 360 | "Epoch 77/100\n", 361 | "128/128 [==============================] - 8s 61ms/step - loss: 0.0207 - accuracy: 0.9946 - val_loss: 0.0429 - val_accuracy: 0.9885\n", 362 | "Epoch 78/100\n", 363 | "128/128 [==============================] - 8s 61ms/step - loss: 0.0299 - accuracy: 0.9892 - val_loss: 0.0386 - val_accuracy: 0.9885\n", 364 | "Epoch 79/100\n", 365 | "128/128 [==============================] - 8s 60ms/step - loss: 0.0314 - accuracy: 0.9897 - val_loss: 0.0617 - val_accuracy: 0.9775\n", 366 | "Epoch 80/100\n", 367 | "128/128 [==============================] - 8s 60ms/step - loss: 0.0435 - accuracy: 0.9843 - val_loss: 0.0403 - val_accuracy: 0.9874\n", 368 | "Epoch 81/100\n", 369 | "128/128 [==============================] - 8s 61ms/step - loss: 0.0438 - accuracy: 0.9863 - val_loss: 0.0891 - val_accuracy: 0.9720\n", 370 | "Epoch 82/100\n", 371 | "128/128 [==============================] - 8s 61ms/step - loss: 0.0389 - accuracy: 0.9863 - val_loss: 0.0583 - val_accuracy: 0.9797\n", 372 | "Epoch 83/100\n", 373 | "128/128 [==============================] - 8s 61ms/step - loss: 0.0334 - accuracy: 0.9897 - val_loss: 0.0445 - val_accuracy: 0.9846\n", 374 | "Epoch 84/100\n", 375 | "128/128 [==============================] - 8s 61ms/step - loss: 0.0413 - accuracy: 0.9902 - val_loss: 0.0895 - val_accuracy: 0.9655\n", 376 | "Epoch 85/100\n", 377 | "128/128 [==============================] - 8s 60ms/step - loss: 0.0408 - accuracy: 0.9882 - val_loss: 0.0440 - val_accuracy: 0.9863\n", 378 | "Epoch 86/100\n", 379 | "128/128 [==============================] - 8s 60ms/step - loss: 0.0223 - accuracy: 0.9927 - val_loss: 0.0289 - val_accuracy: 0.9896\n", 380 | "Epoch 87/100\n", 381 | "128/128 [==============================] - 8s 61ms/step - loss: 0.0261 - accuracy: 0.9917 - val_loss: 0.0606 - val_accuracy: 0.9803\n", 382 | "Epoch 88/100\n", 383 | "128/128 [==============================] - 8s 61ms/step - loss: 0.0294 - accuracy: 0.9912 - val_loss: 0.0395 - val_accuracy: 0.9885\n", 384 | "Epoch 89/100\n", 385 | "128/128 [==============================] - 8s 61ms/step - loss: 0.0229 - accuracy: 0.9936 - val_loss: 0.0494 - val_accuracy: 0.9841\n", 386 | "Epoch 90/100\n", 387 | "128/128 [==============================] - 8s 61ms/step - loss: 0.0306 - accuracy: 0.9882 - val_loss: 0.0658 - val_accuracy: 0.9808\n", 388 | "Epoch 91/100\n", 389 | "128/128 [==============================] - 8s 60ms/step - loss: 0.0623 - accuracy: 0.9814 - val_loss: 0.0496 - val_accuracy: 0.9857\n", 390 | "Epoch 92/100\n", 391 | "128/128 [==============================] - 8s 60ms/step - loss: 0.0393 - accuracy: 0.9877 - val_loss: 0.0401 - val_accuracy: 0.9846\n", 392 | "Epoch 93/100\n", 393 | "128/128 [==============================] - 8s 61ms/step - loss: 0.0348 - accuracy: 0.9868 - val_loss: 0.0902 - val_accuracy: 0.9715\n", 394 | "Epoch 94/100\n", 395 | "128/128 [==============================] - 8s 61ms/step - loss: 0.0224 - accuracy: 0.9926 - val_loss: 0.0463 - val_accuracy: 0.9797\n", 396 | "Epoch 95/100\n", 397 | "128/128 [==============================] - 8s 60ms/step - loss: 0.0297 - accuracy: 0.9897 - val_loss: 0.0645 - val_accuracy: 0.9770\n", 398 | "Epoch 96/100\n", 399 | "128/128 [==============================] - 8s 60ms/step - loss: 0.0405 - accuracy: 0.9858 - val_loss: 0.0602 - val_accuracy: 0.9770\n", 400 | "Epoch 97/100\n", 401 | "128/128 [==============================] - 8s 60ms/step - loss: 0.0194 - accuracy: 0.9927 - val_loss: 0.0500 - val_accuracy: 0.9857\n", 402 | "Epoch 98/100\n", 403 | "128/128 [==============================] - 8s 62ms/step - loss: 0.0311 - accuracy: 0.9897 - val_loss: 0.0332 - val_accuracy: 0.9896\n", 404 | "Epoch 99/100\n", 405 | "128/128 [==============================] - 8s 61ms/step - loss: 0.0231 - accuracy: 0.9931 - val_loss: 0.0556 - val_accuracy: 0.9825\n", 406 | "Epoch 100/100\n", 407 | "128/128 [==============================] - 8s 59ms/step - loss: 0.0223 - accuracy: 0.9926 - val_loss: 0.0526 - val_accuracy: 0.9819\n" 408 | ], 409 | "name": "stdout" 410 | } 411 | ] 412 | } 413 | ] 414 | } 415 | -------------------------------------------------------------------------------- /Model/a.txt: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /Model/model.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rohitgit1/Target-Detection-in-MSTAR-Images/25bb14f82b0c799699cac6bae9e9e54bb903f03a/Model/model.h5 -------------------------------------------------------------------------------- /Project_21C02.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "nbformat": 4, 3 | "nbformat_minor": 0, 4 | "metadata": { 5 | "colab": { 6 | "name": "Project_21C02", 7 | "provenance": [], 8 | "collapsed_sections": [] 9 | }, 10 | "kernelspec": { 11 | "name": "python3", 12 | "display_name": "Python 3" 13 | }, 14 | "accelerator": "GPU" 15 | }, 16 | "cells": [ 17 | { 18 | "cell_type": "markdown", 19 | "metadata": { 20 | "id": "5xVALNYBurlN", 21 | "colab_type": "text" 22 | }, 23 | "source": [ 24 | "# **SAR Target/Object Detection Using Machine Learning**\n", 25 | "\n", 26 | "The notebook represents real time implementation of data processing, model building and testing of multiple Machine Learning Algorithms for detection of the target in MSTAR dataset." 27 | ] 28 | }, 29 | { 30 | "cell_type": "markdown", 31 | "metadata": { 32 | "id": "unN2bCKpIDq4", 33 | "colab_type": "text" 34 | }, 35 | "source": [ 36 | "**Necassary Packages Installation**" 37 | ] 38 | }, 39 | { 40 | "cell_type": "code", 41 | "metadata": { 42 | "id": "GzaMFWkDhmEL", 43 | "colab_type": "code", 44 | "colab": { 45 | "base_uri": "https://localhost:8080/", 46 | "height": 241 47 | }, 48 | "outputId": "b4228afa-8a66-4b24-e17c-68768f1701b5" 49 | }, 50 | "source": [ 51 | "!pip install scipy==1.2.1" 52 | ], 53 | "execution_count": null, 54 | "outputs": [ 55 | { 56 | "output_type": "stream", 57 | "text": [ 58 | "Collecting scipy==1.2.1\n", 59 | "\u001b[?25l Downloading https://files.pythonhosted.org/packages/7f/5f/c48860704092933bf1c4c1574a8de1ffd16bf4fde8bab190d747598844b2/scipy-1.2.1-cp36-cp36m-manylinux1_x86_64.whl (24.8MB)\n", 60 | "\u001b[K |████████████████████████████████| 24.8MB 128kB/s \n", 61 | "\u001b[?25hRequirement already satisfied: numpy>=1.8.2 in /usr/local/lib/python3.6/dist-packages (from scipy==1.2.1) (1.18.5)\n", 62 | "\u001b[31mERROR: umap-learn 0.4.6 has requirement scipy>=1.3.1, but you'll have scipy 1.2.1 which is incompatible.\u001b[0m\n", 63 | "\u001b[31mERROR: tensorflow 2.3.0 has requirement scipy==1.4.1, but you'll have scipy 1.2.1 which is incompatible.\u001b[0m\n", 64 | "\u001b[31mERROR: albumentations 0.1.12 has requirement imgaug<0.2.7,>=0.2.5, but you'll have imgaug 0.2.9 which is incompatible.\u001b[0m\n", 65 | "Installing collected packages: scipy\n", 66 | " Found existing installation: scipy 1.4.1\n", 67 | " Uninstalling scipy-1.4.1:\n", 68 | " Successfully uninstalled scipy-1.4.1\n", 69 | "Successfully installed scipy-1.2.1\n" 70 | ], 71 | "name": "stdout" 72 | } 73 | ] 74 | }, 75 | { 76 | "cell_type": "code", 77 | "metadata": { 78 | "id": "Dkm_A64Ph1sC", 79 | "colab_type": "code", 80 | "colab": {} 81 | }, 82 | "source": [ 83 | "import numpy as np\n", 84 | "from PIL import Image\n", 85 | "import scipy.misc as im\n", 86 | "\n", 87 | "import os\n", 88 | "from sklearn.preprocessing import OneHotEncoder\n", 89 | "from sklearn.decomposition import PCA\n", 90 | "from skimage.transform import resize\n", 91 | "from skimage import data" 92 | ], 93 | "execution_count": null, 94 | "outputs": [] 95 | }, 96 | { 97 | "cell_type": "code", 98 | "metadata": { 99 | "id": "wgEg-TV1h9_t", 100 | "colab_type": "code", 101 | "colab": {} 102 | }, 103 | "source": [ 104 | "import PIL #resizing the images" 105 | ], 106 | "execution_count": null, 107 | "outputs": [] 108 | }, 109 | { 110 | "cell_type": "markdown", 111 | "metadata": { 112 | "id": "isTLNXTDyWwP", 113 | "colab_type": "text" 114 | }, 115 | "source": [ 116 | "**Data Processing**" 117 | ] 118 | }, 119 | { 120 | "cell_type": "markdown", 121 | "metadata": { 122 | "id": "x4uLqaMEGCCE", 123 | "colab_type": "text" 124 | }, 125 | "source": [ 126 | "![alt text](https://drive.google.com/uc?export=view&id=11u7TILbn_1a5OvI97Zrv4nzGke8LSC88)" 127 | ] 128 | }, 129 | { 130 | "cell_type": "markdown", 131 | "metadata": { 132 | "id": "DGZYIzbCHNWK", 133 | "colab_type": "text" 134 | }, 135 | "source": [ 136 | "![alt text](https://drive.google.com/uc?export=view&id=1LtSjZGAiqjl6UPZYBJjiJupzPyyA6YGN)" 137 | ] 138 | }, 139 | { 140 | "cell_type": "code", 141 | "metadata": { 142 | "id": "bZDCGwImiAOd", 143 | "colab_type": "code", 144 | "colab": {} 145 | }, 146 | "source": [ 147 | "def get_mstar_data(stage, width=128, height=128, crop_size=128, aug=False):\n", 148 | " data_dir = \"/content/gdrive/My Drive/PSIT Files/MSTAR_Machine_learning/MSTAR-10/train/\" if stage == \"train\" else \"/content/gdrive/My Drive/PSIT Files/MSTAR_Machine_learning/MSTAR-10/test/\" if stage == \"test\" else None\n", 149 | " print(\"------ \" + stage + \" ------\")\n", 150 | " sub_dir = [\"2S1\", \"BMP2\", \"BRDM_2\", \"BTR60\", \"BTR70\", \"D7\", \"T62\", \"T72\", \"ZIL131\", \"ZSU_23_4\"]\n", 151 | " X = []\n", 152 | " y = []\n", 153 | "\n", 154 | " for i in range(len(sub_dir)):\n", 155 | " tmp_dir = data_dir + sub_dir[i] + \"/\"\n", 156 | " img_idx = [x for x in os.listdir(tmp_dir) if x.endswith(\".jpeg\")]\n", 157 | " print(sub_dir[i], len(img_idx))\n", 158 | " y += [i] * len(img_idx)\n", 159 | " for j in range(len(img_idx)):\n", 160 | " img = im.imresize(im.imread((tmp_dir + img_idx[j])), [height, width])\n", 161 | " img = img[(height - crop_size) // 2 : height - (height - crop_size) // 2, \\\n", 162 | " (width - crop_size) // 2: width - (width - crop_size) // 2]\n", 163 | " # img = img[16:112, 16:112] # crop\n", 164 | " X.append(img)\n", 165 | "\n", 166 | " return np.asarray(X), np.asarray(y)" 167 | ], 168 | "execution_count": null, 169 | "outputs": [] 170 | }, 171 | { 172 | "cell_type": "code", 173 | "metadata": { 174 | "id": "ioONgNWqiRrq", 175 | "colab_type": "code", 176 | "colab": {} 177 | }, 178 | "source": [ 179 | "def data_shuffle(X, y, seed=0):\n", 180 | " data = np.hstack([X, y[:, np.newaxis]])\n", 181 | " np.random.shuffle(data)\n", 182 | " return data[:, :-1], data[:, -1]" 183 | ], 184 | "execution_count": null, 185 | "outputs": [] 186 | }, 187 | { 188 | "cell_type": "code", 189 | "metadata": { 190 | "id": "_Sk_-cl3iUjV", 191 | "colab_type": "code", 192 | "colab": {} 193 | }, 194 | "source": [ 195 | "def one_hot(y_train, y_test):\n", 196 | " one_hot_trans = OneHotEncoder().fit(y_train[:, np.newaxis])\n", 197 | " return one_hot_trans.transform(y_train[:, np.newaxis]).toarray(), one_hot_trans.transform(y_test[:, np.newaxis]).toarray()" 198 | ], 199 | "execution_count": null, 200 | "outputs": [] 201 | }, 202 | { 203 | "cell_type": "code", 204 | "metadata": { 205 | "id": "drM-FP2jiXoF", 206 | "colab_type": "code", 207 | "colab": {} 208 | }, 209 | "source": [ 210 | "def mean_wise(X):\n", 211 | " return (X.T - np.mean(X, axis=1)).T" 212 | ], 213 | "execution_count": null, 214 | "outputs": [] 215 | }, 216 | { 217 | "cell_type": "markdown", 218 | "metadata": { 219 | "id": "9unVSh6NINkP", 220 | "colab_type": "text" 221 | }, 222 | "source": [ 223 | "**Principal Component Analysis for Optimized Processing**" 224 | ] 225 | }, 226 | { 227 | "cell_type": "code", 228 | "metadata": { 229 | "id": "jSheEJ9fibF1", 230 | "colab_type": "code", 231 | "colab": {} 232 | }, 233 | "source": [ 234 | "def pca(X_train, X_test, n):\n", 235 | " pca_trans = PCA(n_components=n).fit(X_train)\n", 236 | " return pca_trans.transform(X_train), pca_trans.transform(X_test)" 237 | ], 238 | "execution_count": null, 239 | "outputs": [] 240 | }, 241 | { 242 | "cell_type": "markdown", 243 | "metadata": { 244 | "id": "lraJ7zHjyt2e", 245 | "colab_type": "text" 246 | }, 247 | "source": [ 248 | "**Analyzing Various Machine Learning Algorithms**" 249 | ] 250 | }, 251 | { 252 | "cell_type": "code", 253 | "metadata": { 254 | "id": "5tBV5voEiekV", 255 | "colab_type": "code", 256 | "colab": {} 257 | }, 258 | "source": [ 259 | "from sklearn.tree import DecisionTreeClassifier\n", 260 | "from sklearn.ensemble import RandomForestClassifier\n", 261 | "from sklearn.ensemble import GradientBoostingClassifier\n", 262 | "from sklearn.linear_model import LogisticRegression\n", 263 | "from sklearn.neural_network import MLPClassifier\n", 264 | "from sklearn.svm import SVC\n", 265 | "from sklearn.neighbors import KNeighborsClassifier\n", 266 | "from sklearn.naive_bayes import GaussianNB" 267 | ], 268 | "execution_count": null, 269 | "outputs": [] 270 | }, 271 | { 272 | "cell_type": "markdown", 273 | "metadata": { 274 | "id": "qk0nPc6Dy4af", 275 | "colab_type": "text" 276 | }, 277 | "source": [ 278 | "**Decision Tree Classifier**" 279 | ] 280 | }, 281 | { 282 | "cell_type": "code", 283 | "metadata": { 284 | "id": "ZzNuEACjiiA2", 285 | "colab_type": "code", 286 | "colab": {} 287 | }, 288 | "source": [ 289 | "def dt(criterion=\"entropy\", max_features=\"sqrt\"):\n", 290 | " return DecisionTreeClassifier(criterion=criterion, max_features=max_features, max_depth=None, random_state=0)" 291 | ], 292 | "execution_count": null, 293 | "outputs": [] 294 | }, 295 | { 296 | "cell_type": "markdown", 297 | "metadata": { 298 | "id": "4hazypO0zCwP", 299 | "colab_type": "text" 300 | }, 301 | "source": [ 302 | "**Random Forest Classification**" 303 | ] 304 | }, 305 | { 306 | "cell_type": "code", 307 | "metadata": { 308 | "id": "CMrTs8KoilXu", 309 | "colab_type": "code", 310 | "colab": {} 311 | }, 312 | "source": [ 313 | "def rf(n_tree=100, max_features=\"sqrt\"):\n", 314 | " return RandomForestClassifier(n_estimators=n_tree, max_features=max_features, min_samples_split=2, \\\n", 315 | " max_depth=None, bootstrap=True, oob_score=False, random_state=0, n_jobs=4)" 316 | ], 317 | "execution_count": null, 318 | "outputs": [] 319 | }, 320 | { 321 | "cell_type": "markdown", 322 | "metadata": { 323 | "id": "Kjvr4aCJzMc8", 324 | "colab_type": "text" 325 | }, 326 | "source": [ 327 | "**Gradient Boosting Classification**" 328 | ] 329 | }, 330 | { 331 | "cell_type": "code", 332 | "metadata": { 333 | "id": "jSrFiKg6inw1", 334 | "colab_type": "code", 335 | "colab": {} 336 | }, 337 | "source": [ 338 | "def gbdt(n_tree=100, max_features=\"sqrt\"):\n", 339 | " return GradientBoostingClassifier(n_estimators=n_tree, learning_rate=0.005, \\\n", 340 | " max_features=max_features, max_depth=None, random_state=0)" 341 | ], 342 | "execution_count": null, 343 | "outputs": [] 344 | }, 345 | { 346 | "cell_type": "markdown", 347 | "metadata": { 348 | "id": "JIPB4ULRzYLG", 349 | "colab_type": "text" 350 | }, 351 | "source": [ 352 | "**Logistic Regression**" 353 | ] 354 | }, 355 | { 356 | "cell_type": "code", 357 | "metadata": { 358 | "id": "TObK0n1oiqwt", 359 | "colab_type": "code", 360 | "colab": {} 361 | }, 362 | "source": [ 363 | "def logit(C=1.0):\n", 364 | " return LogisticRegression(C=1.0, solver=\"lbfgs\", max_iter=1000, random_state=0)" 365 | ], 366 | "execution_count": null, 367 | "outputs": [] 368 | }, 369 | { 370 | "cell_type": "markdown", 371 | "metadata": { 372 | "id": "BDOcVlbZ0DyH", 373 | "colab_type": "text" 374 | }, 375 | "source": [ 376 | "**Multi-layer Perceptron Classifier**" 377 | ] 378 | }, 379 | { 380 | "cell_type": "code", 381 | "metadata": { 382 | "id": "qNUrR6QFitGu", 383 | "colab_type": "code", 384 | "colab": {} 385 | }, 386 | "source": [ 387 | "def mlp(hidden=(100), act=\"logistic\", batch=32):\n", 388 | " return MLPClassifier(hidden_layer_sizes=hidden, activation=act, solver=\"sgd\", batch_size=batch, \\\n", 389 | " learning_rate=\"constant\", learning_rate_init=0.1, early_stopping=False, max_iter=1000, random_state=0)" 390 | ], 391 | "execution_count": null, 392 | "outputs": [] 393 | }, 394 | { 395 | "cell_type": "markdown", 396 | "metadata": { 397 | "id": "JQ53ALJvCbLP", 398 | "colab_type": "text" 399 | }, 400 | "source": [ 401 | "**Support Vector Machine Classifier**" 402 | ] 403 | }, 404 | { 405 | "cell_type": "markdown", 406 | "metadata": { 407 | "id": "pyQsAp1V0Pcf", 408 | "colab_type": "text" 409 | }, 410 | "source": [ 411 | "![alt text](https://drive.google.com/uc?export=view&id=1ABtElBwMDn1dmSOenCGok6i5rhsaq4Ql)" 412 | ] 413 | }, 414 | { 415 | "cell_type": "code", 416 | "metadata": { 417 | "id": "bEAsvZ5iiwjk", 418 | "colab_type": "code", 419 | "colab": {} 420 | }, 421 | "source": [ 422 | "def svm(C=1.0, kernel=\"rbf\"):\n", 423 | " return SVC(C=C, kernel=kernel, max_iter=-1, random_state=0)" 424 | ], 425 | "execution_count": null, 426 | "outputs": [] 427 | }, 428 | { 429 | "cell_type": "markdown", 430 | "metadata": { 431 | "id": "zXmNhiwe0WHW", 432 | "colab_type": "text" 433 | }, 434 | "source": [ 435 | "**K Nearest Neighbours Classification**" 436 | ] 437 | }, 438 | { 439 | "cell_type": "code", 440 | "metadata": { 441 | "id": "pyFO4JwVi15N", 442 | "colab_type": "code", 443 | "colab": {} 444 | }, 445 | "source": [ 446 | "def knn(n_neighbors=10, weights=\"distance\"):\n", 447 | " return KNeighborsClassifier(n_neighbors=n_neighbors, weights=weights, algorithm=\"auto\")" 448 | ], 449 | "execution_count": null, 450 | "outputs": [] 451 | }, 452 | { 453 | "cell_type": "markdown", 454 | "metadata": { 455 | "id": "Q6vUw42D0h9s", 456 | "colab_type": "text" 457 | }, 458 | "source": [ 459 | "**Naive Bayes Classification**" 460 | ] 461 | }, 462 | { 463 | "cell_type": "code", 464 | "metadata": { 465 | "id": "rW1Em8c6i4R8", 466 | "colab_type": "code", 467 | "colab": {} 468 | }, 469 | "source": [ 470 | "def bayes():\n", 471 | " return GaussianNB()" 472 | ], 473 | "execution_count": null, 474 | "outputs": [] 475 | }, 476 | { 477 | "cell_type": "markdown", 478 | "metadata": { 479 | "id": "Am6qP2aN0rd0", 480 | "colab_type": "text" 481 | }, 482 | "source": [ 483 | "**Trainer Fuction**" 484 | ] 485 | }, 486 | { 487 | "cell_type": "code", 488 | "metadata": { 489 | "id": "WdX5EV74i7Qd", 490 | "colab_type": "code", 491 | "colab": {} 492 | }, 493 | "source": [ 494 | "def train(X, y, classifier):\n", 495 | " return classifier.fit(X, y)" 496 | ], 497 | "execution_count": null, 498 | "outputs": [] 499 | }, 500 | { 501 | "cell_type": "markdown", 502 | "metadata": { 503 | "id": "Df-y3hxS0vhm", 504 | "colab_type": "text" 505 | }, 506 | "source": [ 507 | "**Testing Function**" 508 | ] 509 | }, 510 | { 511 | "cell_type": "code", 512 | "metadata": { 513 | "id": "rfdBkPVKi9nk", 514 | "colab_type": "code", 515 | "colab": {} 516 | }, 517 | "source": [ 518 | "def test(X, classifier):\n", 519 | " return classifier.predict(X)" 520 | ], 521 | "execution_count": null, 522 | "outputs": [] 523 | }, 524 | { 525 | "cell_type": "markdown", 526 | "metadata": { 527 | "id": "LhwEiL5m02J1", 528 | "colab_type": "text" 529 | }, 530 | "source": [ 531 | "**Accuracy Function**" 532 | ] 533 | }, 534 | { 535 | "cell_type": "code", 536 | "metadata": { 537 | "id": "MWUJS3YMi_wE", 538 | "colab_type": "code", 539 | "colab": {} 540 | }, 541 | "source": [ 542 | "def acc(X, y, classifier):\n", 543 | " return classifier.score(X, y)" 544 | ], 545 | "execution_count": null, 546 | "outputs": [] 547 | }, 548 | { 549 | "cell_type": "markdown", 550 | "metadata": { 551 | "id": "JziS0Uol1Au8", 552 | "colab_type": "text" 553 | }, 554 | "source": [ 555 | "**Implementation of Algorithms real time in order to find the most optimized solution**" 556 | ] 557 | }, 558 | { 559 | "cell_type": "code", 560 | "metadata": { 561 | "id": "cTUW-zbFjCaN", 562 | "colab_type": "code", 563 | "colab": {} 564 | }, 565 | "source": [ 566 | "import numpy as np\n", 567 | "import matplotlib.pyplot as plt\n", 568 | "import matplotlib.cm as cm" 569 | ], 570 | "execution_count": null, 571 | "outputs": [] 572 | }, 573 | { 574 | "cell_type": "markdown", 575 | "metadata": { 576 | "id": "wgISV6Bf1La1", 577 | "colab_type": "text" 578 | }, 579 | "source": [ 580 | "*Let's load the processed M-STAR dataset*" 581 | ] 582 | }, 583 | { 584 | "cell_type": "code", 585 | "metadata": { 586 | "id": "Qbm2lAWajFQs", 587 | "colab_type": "code", 588 | "colab": { 589 | "base_uri": "https://localhost:8080/", 590 | "height": 561 591 | }, 592 | "outputId": "d57895d1-0867-402f-a57a-83988bbfe223" 593 | }, 594 | "source": [ 595 | "print(\"loading ... \")\n", 596 | "X_train, y_train = get_mstar_data(\"train\", 128, 128, 96)\n", 597 | "X_test, y_test = get_mstar_data(\"test\", 128, 128, 96)\n", 598 | "X_train = np.reshape(X_train, [X_train.shape[0], X_train.shape[1] * X_train.shape[2]])\n", 599 | "X_test = np.reshape(X_test, [X_test.shape[0], X_test.shape[1] * X_test.shape[2]])\n", 600 | "print(X_train.shape, y_train.shape, X_test.shape, y_test.shape)" 601 | ], 602 | "execution_count": null, 603 | "outputs": [ 604 | { 605 | "output_type": "stream", 606 | "text": [ 607 | "loading ... \n", 608 | "------ train ------\n", 609 | "2S1 299\n" 610 | ], 611 | "name": "stdout" 612 | }, 613 | { 614 | "output_type": "stream", 615 | "text": [ 616 | "/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:14: DeprecationWarning: `imread` is deprecated!\n", 617 | "`imread` is deprecated in SciPy 1.0.0, and will be removed in 1.2.0.\n", 618 | "Use ``imageio.imread`` instead.\n", 619 | " \n", 620 | "/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:14: DeprecationWarning: `imresize` is deprecated!\n", 621 | "`imresize` is deprecated in SciPy 1.0.0, and will be removed in 1.3.0.\n", 622 | "Use Pillow instead: ``numpy.array(Image.fromarray(arr).resize())``.\n", 623 | " \n" 624 | ], 625 | "name": "stderr" 626 | }, 627 | { 628 | "output_type": "stream", 629 | "text": [ 630 | "BMP2 233\n", 631 | "BRDM_2 298\n", 632 | "BTR60 256\n", 633 | "BTR70 233\n", 634 | "D7 299\n", 635 | "T62 298\n", 636 | "T72 232\n", 637 | "ZIL131 299\n", 638 | "ZSU_23_4 299\n", 639 | "------ test ------\n", 640 | "2S1 274\n", 641 | "BMP2 195\n", 642 | "BRDM_2 274\n", 643 | "BTR60 195\n", 644 | "BTR70 196\n", 645 | "D7 274\n", 646 | "T62 273\n", 647 | "T72 196\n", 648 | "ZIL131 274\n", 649 | "ZSU_23_4 274\n", 650 | "(2746, 9216) (2746,) (2425, 9216) (2425,)\n" 651 | ], 652 | "name": "stdout" 653 | } 654 | ] 655 | }, 656 | { 657 | "cell_type": "markdown", 658 | "metadata": { 659 | "id": "qEWH9fBX1ZLM", 660 | "colab_type": "text" 661 | }, 662 | "source": [ 663 | "*Shuffling the loaded dataset to eliminate any bias*" 664 | ] 665 | }, 666 | { 667 | "cell_type": "code", 668 | "metadata": { 669 | "id": "DeWdKdt9jIi9", 670 | "colab_type": "code", 671 | "colab": { 672 | "base_uri": "https://localhost:8080/", 673 | "height": 34 674 | }, 675 | "outputId": "0b09c09b-6bc7-4b45-fe18-603ca8e997e0" 676 | }, 677 | "source": [ 678 | "print(\"shuffling ... \")\n", 679 | "X_train, y_train = data_shuffle(X_train, y_train)\n", 680 | "X_test, y_test = data_shuffle(X_test, y_test)" 681 | ], 682 | "execution_count": null, 683 | "outputs": [ 684 | { 685 | "output_type": "stream", 686 | "text": [ 687 | "shuffling ... \n" 688 | ], 689 | "name": "stdout" 690 | } 691 | ] 692 | }, 693 | { 694 | "cell_type": "markdown", 695 | "metadata": { 696 | "id": "mDp06Ipi1hZn", 697 | "colab_type": "text" 698 | }, 699 | "source": [ 700 | "*Preprocessing the dataset*" 701 | ] 702 | }, 703 | { 704 | "cell_type": "code", 705 | "metadata": { 706 | "id": "Gem5U7RoxFv_", 707 | "colab_type": "code", 708 | "colab": { 709 | "base_uri": "https://localhost:8080/", 710 | "height": 34 711 | }, 712 | "outputId": "95568cc5-599b-4f9e-fb8f-e126ecea474b" 713 | }, 714 | "source": [ 715 | "print(\"preprocessing ...\")\n", 716 | "X_train = X_train / 255.0\n", 717 | "X_test = X_test / 255.0\n", 718 | "X_train = mean_wise(X_train)\n", 719 | "X_test = mean_wise(X_test)\n", 720 | "X_train, X_test = pca(X_train, X_test, 80)\n", 721 | "# y_train, y_test = data.one_hot(y_train, y_test)" 722 | ], 723 | "execution_count": null, 724 | "outputs": [ 725 | { 726 | "output_type": "stream", 727 | "text": [ 728 | "preprocessing ...\n" 729 | ], 730 | "name": "stdout" 731 | } 732 | ] 733 | }, 734 | { 735 | "cell_type": "markdown", 736 | "metadata": { 737 | "id": "owaJixtu10Iu", 738 | "colab_type": "text" 739 | }, 740 | "source": [ 741 | "*Finally Training the model*" 742 | ] 743 | }, 744 | { 745 | "cell_type": "code", 746 | "metadata": { 747 | "id": "R0yEQFMpxOTz", 748 | "colab_type": "code", 749 | "colab": { 750 | "base_uri": "https://localhost:8080/", 751 | "height": 34 752 | }, 753 | "outputId": "2bd97ed2-88d5-4652-ce7c-9b9bb9c2a35f" 754 | }, 755 | "source": [ 756 | "print(\"training ...\")\n", 757 | "# classifier = train(X_train, y_train, dt(\"entropy\", 0.8)) # 70.68%\n", 758 | "# classifier = train(X_train, y_train, rf(1000, \"sqrt\")) # 96.49%\n", 759 | "# classifier = train(X_train, y_train, gbdt(1000, \"sqrt\")) # 95.17%\n", 760 | "# classifier = train(X_train, y_train, logit(1.0)) # 90.14%\n", 761 | "# classifier = train(X_train, y_train, mlp(1000, \"logistic\")) # 93.36%\n", 762 | "# classifier = train(X_train, y_train, svm(1.0, \"rbf\")) # 96.82%\n", 763 | "# classifier = train(X_train, y_train, knn(10, \"uniform\")) # 95.34%\n", 764 | "classifier = train(X_train, y_train, svm()) # 97.81%" 765 | ], 766 | "execution_count": null, 767 | "outputs": [ 768 | { 769 | "output_type": "stream", 770 | "text": [ 771 | "training ...\n" 772 | ], 773 | "name": "stdout" 774 | } 775 | ] 776 | }, 777 | { 778 | "cell_type": "markdown", 779 | "metadata": { 780 | "id": "E3V5CtQS2DM2", 781 | "colab_type": "text" 782 | }, 783 | "source": [ 784 | "*As seen while analyzing the above algorithms, Support Vector machine was able to provide higest accuracy for determining the vehicle images from the test data, that is 97.8% accuracy.*" 785 | ] 786 | }, 787 | { 788 | "cell_type": "code", 789 | "metadata": { 790 | "id": "BSZGPQRoxj4h", 791 | "colab_type": "code", 792 | "colab": { 793 | "base_uri": "https://localhost:8080/", 794 | "height": 68 795 | }, 796 | "outputId": "90763604-c30c-48ea-849f-081bcb2a52e7" 797 | }, 798 | "source": [ 799 | "print(\"testing ...\")\n", 800 | "print(acc(X_train, y_train, classifier))\n", 801 | "print(acc(X_test, y_test, classifier))" 802 | ], 803 | "execution_count": null, 804 | "outputs": [ 805 | { 806 | "output_type": "stream", 807 | "text": [ 808 | "testing ...\n", 809 | "0.9996358339402768\n", 810 | "0.9781443298969072\n" 811 | ], 812 | "name": "stdout" 813 | } 814 | ] 815 | }, 816 | { 817 | "cell_type": "markdown", 818 | "metadata": { 819 | "id": "QmjN0SNS3LMA", 820 | "colab_type": "text" 821 | }, 822 | "source": [ 823 | "**The END**" 824 | ] 825 | }, 826 | { 827 | "cell_type": "markdown", 828 | "metadata": { 829 | "id": "j0Rsh1NhIeem", 830 | "colab_type": "text" 831 | }, 832 | "source": [ 833 | "**Thank You**" 834 | ] 835 | } 836 | ] 837 | } 838 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # **Target Detection In MSTAR Images** 2 | 3 | >## ✍  Problem Statement 4 | ## Target Detector :dart:! 5 | Synthetic Aperture Radar(SAR) is an imaging radar that transmits microwaves which generates imagery through the reflected microwaves from the objects in both high and azimuth range resolutions. The good things about SAR is that it works in all weather conditions, day/night so it has many application areas like navigation, guidance, remote sensing, reconnaissance, resource exploration etc.. It is difficult to recognize an object in SAR imagery due to absence of colour information and shape reflection from a target changes. So, here the problem statement is to recognize the targets automatically in SAR have always been a challenge in research community. 6 | 7 | 8 |

9 | 10 |

11 | 12 | >## 📂  RELEVANT TECHNOLOGY STACK 13 | * ML 14 | * Python 3 15 | * Tensorflow 16 | * Image processing 17 | * Spacy 18 | 19 | >## 💻  GETTING STARTED 20 | 21 | => **Fork this repository to start contributing.** 22 | 23 | => Open your Git Bash command window and in the root directory type the following commands : 24 | ```bash 25 | 1) git init -initializes the git repository from the GitHub. 26 | 2) git clone -Clone the repository to your local machine 27 | (git clone https://github.com//rohitgit1/Target-Detection-in-MSTAR-Images.git) 28 | ``` 29 | 30 | References and Credits : 1. https://github.com/hunterlew/mstar_with_machine_learning 31 | 2. https://github.com/shuibao/CNN_MSTAR 32 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | flask==2.0.1 2 | tensorflow-cpu==2.5.0 3 | tensorflow_hub==0.12.0 4 | matplotlib==3.4.2 5 | numpy==1.19.5 6 | scipy==1.7.0 7 | pandas==1.1.1 8 | gunicorn==20.0.4 9 | --------------------------------------------------------------------------------