├── Dataset ├── Clay ├── Water ├── Wheat ├── EuFinal ├── Dalforest ├── Grassland ├── subfebformosat2 └── subfebformosat2.hdr ├── Results ├── Hard_classified.PNG ├── Soft_classification1.PNG └── Soft_classification2.PNG ├── LICENSE ├── utils.py ├── README.md └── notebooks └── Hybrid CNN- RNN Model.ipynb /Dataset/Clay: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anumitgarg/Hybrid-CNN-RNN-Model-for-Hyperspectral-Satellite-Image-Classification/HEAD/Dataset/Clay -------------------------------------------------------------------------------- /Dataset/Water: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anumitgarg/Hybrid-CNN-RNN-Model-for-Hyperspectral-Satellite-Image-Classification/HEAD/Dataset/Water -------------------------------------------------------------------------------- /Dataset/Wheat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anumitgarg/Hybrid-CNN-RNN-Model-for-Hyperspectral-Satellite-Image-Classification/HEAD/Dataset/Wheat -------------------------------------------------------------------------------- /Dataset/EuFinal: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anumitgarg/Hybrid-CNN-RNN-Model-for-Hyperspectral-Satellite-Image-Classification/HEAD/Dataset/EuFinal -------------------------------------------------------------------------------- /Dataset/Dalforest: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anumitgarg/Hybrid-CNN-RNN-Model-for-Hyperspectral-Satellite-Image-Classification/HEAD/Dataset/Dalforest -------------------------------------------------------------------------------- /Dataset/Grassland: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anumitgarg/Hybrid-CNN-RNN-Model-for-Hyperspectral-Satellite-Image-Classification/HEAD/Dataset/Grassland -------------------------------------------------------------------------------- /Dataset/subfebformosat2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anumitgarg/Hybrid-CNN-RNN-Model-for-Hyperspectral-Satellite-Image-Classification/HEAD/Dataset/subfebformosat2 -------------------------------------------------------------------------------- /Results/Hard_classified.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anumitgarg/Hybrid-CNN-RNN-Model-for-Hyperspectral-Satellite-Image-Classification/HEAD/Results/Hard_classified.PNG -------------------------------------------------------------------------------- /Results/Soft_classification1.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anumitgarg/Hybrid-CNN-RNN-Model-for-Hyperspectral-Satellite-Image-Classification/HEAD/Results/Soft_classification1.PNG -------------------------------------------------------------------------------- /Results/Soft_classification2.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anumitgarg/Hybrid-CNN-RNN-Model-for-Hyperspectral-Satellite-Image-Classification/HEAD/Results/Soft_classification2.PNG -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 anumitgarg 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Dataset/subfebformosat2.hdr: -------------------------------------------------------------------------------- 1 | BANDS: 4 2 | ROWS: 436 3 | COLS: 506 4 | INTERLEAVING: BIL 5 | DATATYPE: U8 6 | BYTE_ORDER: NA 7 | UL_X_COORDINATE: 226448.000000 8 | UL_Y_COORDINATE: 3306128.000000 9 | LR_X_COORDINATE: 230488.000000 10 | LR_Y_COORDINATE: 3302648.000000 11 | PIXEL_WIDTH: 8.0000000 12 | PIXEL_HEIGHT: 8.0000000 13 | MAP_UNITS: meters 14 | PROJECTION_NAME: UTM 15 | PROJECTION_ZONE: 44 16 | PROJECTION_PARAMETERS: 17 | 0.000000 18 | 0.000000 19 | 0.000000 20 | 1.000000 21 | -0.000000 22 | 214896290070074160000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.000000 23 | 1144398331246043300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.000000 24 | 0.000000 25 | 0.000000 26 | -0.000000 27 | 0.000000 28 | 0.000000 29 | 0.000000 30 | 0.000000 31 | -0.000000 32 | SPHEROID_NAME: WGS 84 33 | DATUM_NAME: WGS 84 34 | SEMI_MAJOR_AXIS: 6378137.000000 35 | SEMI_MINOR_AXIS: 6356752.314245 36 | E_SQUARED: 0.006694 37 | RADIUS: 6371007.180918 38 | -------------------------------------------------------------------------------- /utils.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | import numpy as np 5 | import matplotlib.pyplot as plt 6 | from osgeo import gdal 7 | from PIL import Image 8 | import array 9 | import sys 10 | import os 11 | 12 | 13 | 14 | # Reading Header file 15 | def hdr_read(path): 16 | row = 0 17 | col = 0 18 | bands = 0 19 | datatype = None 20 | with open(path, "r") as f: 21 | for l in f: 22 | k = l.split() 23 | if k[0] == "BANDS:": 24 | bands = k[1] 25 | elif k[0] == 'ROWS:': 26 | row = k[1] 27 | elif k[0] == 'COLS:': 28 | col = k[1] 29 | elif k[0] == 'DATATYPE:': 30 | datatype = k[1] 31 | mul, D_type = (255, 'uint8') if datatype == 'U8' else ((2**16-1), 'U16') 32 | print(mul, D_type) 33 | row = int(row) 34 | col = int(col) 35 | bands = int(bands) 36 | return row, col, bands, datatype 37 | 38 | 39 | 40 | # Reading Image file 41 | def ReadBilFile(bil,bands,pixels): 42 | extract_band = 1 43 | image = np.zeros([pixels, bands], dtype=np.uint16) 44 | gdal.GetDriverByName('EHdr').Register() 45 | img = gdal.Open(bil) 46 | while bands >= extract_band: 47 | bandx = img.GetRasterBand(extract_band) 48 | datax = bandx.ReadAsArray() 49 | temp = datax 50 | store = temp.reshape(pixels) 51 | for i in range(pixels): 52 | image[i][extract_band - 1] = store[i] 53 | extract_band = extract_band + 1 54 | return image 55 | 56 | 57 | 58 | # Returns a numpy array after thresholding 59 | def thresholding(Y, threshold): 60 | for i in range (Y.shape[0]): 61 | if(Y[i, :] < threshold): 62 | Y[i, :] = 0 63 | else: 64 | Y[i, :] = Y[i, :] 65 | return Y 66 | 67 | 68 | 69 | # Returns a numoy array after linear thretching its pixels 70 | def linear_stretch(y_test): 71 | minima = np.amin(y_test) 72 | maxima = np.amax(y_test) 73 | den = maxima-minima 74 | Y = ((y_test-minima)/ den)*(2**8 - 1) 75 | return(Y) 76 | 77 | 78 | 79 | # Takes matrix, no. of rows/col, and name with which image is to be saved 80 | # Returns a display on screen and saves the image with the given name 81 | def display_save_image(y, row, col, name): 82 | img = y.reshape(row, col) 83 | plt.imshow(img) 84 | plt.show() 85 | plt.savefig(name + '.png') 86 | 87 | 88 | 89 | # SOFT CLASSIFIER 90 | def extract_membership(y, class_no): 91 | Y = y[:, class_no].reshape(y.shape[0], 1) 92 | return(Y) 93 | 94 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Hybrid CNN-RNN Model for Hyperspectral Satellite Image Classification 2 | 3 | Satellite image processing is one of the key research areas in the area of remote sensing. In hyperspectral satellite imaging information from across a wide range of electromagnetic spectrum is collected by satellites. The goal of Remote Sensing is to obtain the spectrum for each pixel in the image of a scene, with the purpose of terrain analysis and generation, topographic mapping.
4 | 5 | Traditional statistical approaches provides reasonable success in this field, but the efficiency is limited with respect to the robustness of results. The statistical approaches are parametric, based on an assumed statistical distribution and hence the efficiency and correctness of results closely correlates to the proximity of data to the assumed distribution. Today advancements in artificial intelligence have made autonomous, large scale analysis of imagery possible, now AI can be taught to process Satellite Imagery with a small degree of error.
6 | 7 | In the following model we use hybrid CNN- RNN model for classification of each pixel to its corresponding classes. Further the code is developed to classify pixels in accordance with soft as well as hard classification techniques.
8 | 1.) HARD CLASSIFICATION : A pixel strictly belongs to a single class.
9 | 2.) SOFT CLASSIFICATION : A pixel may belong to more than one class based on its membership value.

10 | 11 | ## Libraries used 12 | * Keras: Keras is an open-source neural-network library written in Python. Designed to enable fast experimentation with deep neural networks, it focuses on being user-friendly, modular, and extensible. 13 | * Tensorflow: TensorFlow is a free and open-source software library for dataflow and differentiable programming across a range of tasks. It is a symbolic math library, and is also used for machine learning applications such as neural networks. 14 | * GDAL: The Geospatial Data Abstraction Library is a computer software library for reading and writing raster and vector geospatial data formats, and is released under the permissive X/MIT style free software license by the Open Source Geospatial Foundation. 15 | * Pillow: Python Imaging Library is a free library for the Python programming language that adds support for opening, manipulating, and saving many different image file formats. 16 | * Tkinter: Tkinter is the standard GUI library for Python. Python when combined with Tkinter provides a fast and easy way to create GUI applications. 17 | * Pickle: The pickle module implements binary protocols for serializing and de-serializing a Python object structure. “Pickling” is the process whereby a Python object hierarchy is converted into a byte stream, and “unpickling” is the inverse operation, whereby a byte stream (from a binary file or bytes-like object) is converted back into an object hierarchy. 18 | * Numpy: NumPy is a library for the Python programming language, adding support for large, multi-dimensional arrays and matrices, along with a large collection of high-level mathematical functions to operate on these arrays. 19 | * Matplotlib: Matplotlib is a plotting library for the Python programming language and its numerical mathematics extension NumPy.
20 | ## Description 21 | The repository consists of following files and folders 22 | * Notebooks: It consists of two jupyter notebooks 23 | - Hybrid CNN- RNN Model 24 | - Classification Tool (Hybrid CNN- RNN Model) 25 | * utils.py: Contains the necessary functions used in the notebook 26 | * Dataset: 27 | - Header and Image file (subfebformosat2) 28 | - Training set of 6 classes 29 | * Results: Contain image files
30 | ## Results 31 | 32 | ![htr](./Results/Hard_classified.PNG)
33 | HARD CLASSIFICATION 34 |


35 | ![htr](./Results/Soft_classification1.PNG)
36 | SOFT CLASSIFICATION 37 | -------------------------------------------------------------------------------- /notebooks/Hybrid CNN- RNN Model.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Hybrid CNN- RNN Model for Classification of Hyperspectral Satelite Images \n", 8 | "The following model uses hybrid CNN- RNN model for classification of each pixel to its corresponding classes. Further the code is developed to classify pixels in accordance with soft as well as hard classification techniques.
\n", 9 | "1.) HARD CLASSIFICATION : A pixel strictly belongs to a single class.
\n", 10 | "2.) SOFT CLASSIFICATION : A pixel may belong to more than one class based on its membership value.

" 11 | ] 12 | }, 13 | { 14 | "cell_type": "markdown", 15 | "metadata": {}, 16 | "source": [ 17 | "## Importing necessary libraries and supporting files \n", 18 | "NOTE- 'utils' is a supporting .py file which contains all the functions used in the notebook" 19 | ] 20 | }, 21 | { 22 | "cell_type": "code", 23 | "execution_count": 13, 24 | "metadata": {}, 25 | "outputs": [], 26 | "source": [ 27 | "from __future__ import absolute_import\n", 28 | "from __future__ import division\n", 29 | "from __future__ import print_function\n", 30 | "import numpy as np\n", 31 | "import matplotlib.pyplot as plt\n", 32 | "from osgeo import gdal\n", 33 | "from keras.layers import Dense, Conv1D, Input, MaxPooling1D, Flatten, Dropout, LSTM\n", 34 | "from keras import Sequential\n", 35 | "from keras.utils import np_utils\n", 36 | "from PIL import Image\n", 37 | "import array\n", 38 | "import sys\n", 39 | "import os\n", 40 | "from utils import hdr_read, ReadBilFile, thresholding, linear_stretch, display_save_image, extract_membership\n", 41 | "seed = 7\n", 42 | "np.random.seed(seed)\n" 43 | ] 44 | }, 45 | { 46 | "cell_type": "markdown", 47 | "metadata": {}, 48 | "source": [ 49 | "## User Defined Parameters - \n", 50 | "1.)Header File: Contains the information about the image file such as - Datatype, Number of Rows and Columns, Total number of bands etc
\n", 51 | "2.)Image File: The information extracted from header file(with extension .hdr) is used to extract the hyperspectral image from the Image File
\n", 52 | "3.)Training Data: Training data consists of the value of pixel from each band for a particular class
\n", 53 | "4.)Epochs(model training): Number of iterations required to train the model. A lower value of epochs may lead to underfitting while a very high value may lead to overfitting of dataset, thus the choise of epochs is a crusial factor
\n", 54 | "5.)Selected class and Threshold value(soft classification)): For the purpose of soft classification user needs to specify the threshold value (it acts as a limit, the pixels having values above that value are only classifies while pixels below it are assigned a NULL value)
" 55 | ] 56 | }, 57 | { 58 | "cell_type": "code", 59 | "execution_count": 14, 60 | "metadata": {}, 61 | "outputs": [], 62 | "source": [ 63 | "# User defined inputs\n", 64 | "header_name = r\"C:\\Users\\HP DV-6\\Desktop\\final_project\\subfebformosat2.hdr\"\n", 65 | "file_name = r\"C:\\Users\\HP DV-6\\Desktop\\final_project\\subfebformosat2\"\n", 66 | "\n", 67 | "directory = r'C:\\Users\\HP DV-6\\Desktop\\final_project\\Data'\n", 68 | "\n", 69 | "epoch = '50'\n", 70 | "\n", 71 | "threshold = '0.8'\n", 72 | "\n", 73 | "category = 'Clay'" 74 | ] 75 | }, 76 | { 77 | "cell_type": "code", 78 | "execution_count": 15, 79 | "metadata": {}, 80 | "outputs": [], 81 | "source": [ 82 | "# The functions are used for the conversion of string to integer and float values\n", 83 | "# Further the directory path is corrected \n", 84 | "epoch = int(epoch)\n", 85 | "threshold = float(threshold)\n", 86 | "\n", 87 | "directory = directory.replace(os.sep, '/')\n", 88 | "directory = directory + '/'\n", 89 | "\n", 90 | "header_name = header_name.replace(os.sep, '/')\n", 91 | "\n", 92 | "file_name = file_name.replace(os.sep, '/')" 93 | ] 94 | }, 95 | { 96 | "cell_type": "code", 97 | "execution_count": 16, 98 | "metadata": {}, 99 | "outputs": [ 100 | { 101 | "name": "stdout", 102 | "output_type": "stream", 103 | "text": [ 104 | "255 uint8\n" 105 | ] 106 | } 107 | ], 108 | "source": [ 109 | "row, col, bands, datatype = hdr_read(header_name)\n", 110 | "x_test = ReadBilFile(file_name, bands, row*col)\n", 111 | "x_test = x_test.reshape(row*col, bands, 1)\n", 112 | "x_test = x_test / 2**8-1\n", 113 | "\n", 114 | "y_test = np.zeros([row*col], dtype=np.uint8) #Each pixel will belong to one class, so total no. of outputs=rows*columns" 115 | ] 116 | }, 117 | { 118 | "cell_type": "code", 119 | "execution_count": 17, 120 | "metadata": {}, 121 | "outputs": [ 122 | { 123 | "name": "stdout", 124 | "output_type": "stream", 125 | "text": [ 126 | "Clay ==> 22\n", 127 | "Dalforest ==> 23\n", 128 | "EuFinal ==> 8\n", 129 | "Grassland ==> 27\n", 130 | "Water ==> 36\n", 131 | "Wheat ==> 27\n" 132 | ] 133 | } 134 | ], 135 | "source": [ 136 | "items = os.listdir(directory)\n", 137 | "path = items\n", 138 | "c_c = len(path)\n", 139 | "c_l = {items[i] : i for i in range(0, len(items))}\n", 140 | "\n", 141 | "values = []\n", 142 | "clicks={}\n", 143 | "\n", 144 | "for address in path: # function to know the total number of clicks of each class\n", 145 | " with open(directory+address, \"rb\") as f:\n", 146 | " k = len(f.read())\n", 147 | " clicks[address] = (k // 2 // bands) if (k // 2 // bands) < 400 else (k // 2 // bands) // 4\n", 148 | " print('{} ==> {}'.format(address, clicks[address]))\n", 149 | "\n", 150 | "for address in path: # function (VALUES) returns the value of all the pixels (len(values) = total_clicks * 4)\n", 151 | " with open(directory+address, \"rb\") as f:\n", 152 | " b = array.array(\"H\")\n", 153 | " b.fromfile(f, clicks[address]*bands)\n", 154 | " if sys.byteorder == \"little\":\n", 155 | " b.byteswap()\n", 156 | " for v in b:\n", 157 | " values.append(v)" 158 | ] 159 | }, 160 | { 161 | "cell_type": "code", 162 | "execution_count": 18, 163 | "metadata": {}, 164 | "outputs": [], 165 | "source": [ 166 | "# Creation of training set\n", 167 | "ll = (len(values)) # = total_clicks * 4\n", 168 | "rex = ll // bands # = total clicks\n", 169 | "\n", 170 | "\n", 171 | "# Features\n", 172 | "f_in = np.zeros([ll], dtype=np.uint8)\n", 173 | "x = 0\n", 174 | "for i in range(ll):\n", 175 | " f_in[x] = values[i]\n", 176 | " x += 1\n", 177 | " \n", 178 | "x_train = f_in.reshape(rex, bands)\n", 179 | "x_train = x_train / 2**8-1 # Normalising dataset to facilitate fast training\n", 180 | "\n", 181 | "# Labels\n", 182 | "y_train = np.zeros([rex], dtype=np.uint8)\n", 183 | "mark = 0 # Creating label file (Mark does bifurcation of each class)\n", 184 | "for add in path: # outer loop iterates over each class\n", 185 | " for i in range(clicks[add]): # Inner loop assigns 1, 2, 3 based on c_1 dictionary to each pixel location\n", 186 | " y_train[mark+i] = c_l[add]\n", 187 | " mark = mark + clicks[add]" 188 | ] 189 | }, 190 | { 191 | "cell_type": "code", 192 | "execution_count": 19, 193 | "metadata": {}, 194 | "outputs": [], 195 | "source": [ 196 | "y_train = np_utils.to_categorical(y_train)\n", 197 | "y_test = np_utils.to_categorical(y_test)\n", 198 | "n_classes = c_c\n", 199 | "y_test_new = np.zeros([row * col, c_c], dtype=np.uint8)\n", 200 | "\n", 201 | "\n", 202 | "X = x_train.reshape(x_train.shape[0], bands, 1)" 203 | ] 204 | }, 205 | { 206 | "cell_type": "markdown", 207 | "metadata": {}, 208 | "source": [ 209 | "## Structuring Model and defining model parameters\n", 210 | "KERAS FRAMEWORK is used for the implementation of the model. The model consists of successive 1 dimensional Convolution and LSTM layers followed by Max Pooling layers.

\n", 211 | "Convolution Neural Network: Convolutional Neural Networks, or CNNs were originally designed to map image data to an output variable.\n", 212 | "They have proven so effective that they are the go-to method for any type of prediction problem involving image data as an input. More generally, CNNs work well with data that has a spatial relationship.
\n", 213 | "Recurrent Neural Network: Recurrent Neural Networks, or RNNs, were designed to work with sequence prediction problems. RNNs in general and LSTMs in particular have received the most success when working with sequences of words and paragraphs, generally called natural language processing. However they can be used for variety of other aplications such as image processing as well" 214 | ] 215 | }, 216 | { 217 | "cell_type": "code", 218 | "execution_count": 20, 219 | "metadata": {}, 220 | "outputs": [], 221 | "source": [ 222 | "model = Sequential()\n", 223 | "\n", 224 | "model.add(Conv1D(32, 4, activation='relu', padding='same', input_shape=(4, 1)))\n", 225 | "model.add(LSTM(32, return_sequences=True))\n", 226 | "model.add(MaxPooling1D(2))\n", 227 | "model.add(Conv1D(16, 8, activation=\"relu\", padding='same'))\n", 228 | "model.add(LSTM(64, return_sequences=True))\n", 229 | "model.add(MaxPooling1D(2))\n", 230 | "model.add(Conv1D(16, 8, activation=\"relu\", padding='same'))\n", 231 | "model.add(LSTM(128))\n", 232 | "model.add(Dense(n_classes, activation='sigmoid'))" 233 | ] 234 | }, 235 | { 236 | "cell_type": "markdown", 237 | "metadata": {}, 238 | "source": [ 239 | "### Model compilation and summary" 240 | ] 241 | }, 242 | { 243 | "cell_type": "code", 244 | "execution_count": 21, 245 | "metadata": {}, 246 | "outputs": [ 247 | { 248 | "name": "stdout", 249 | "output_type": "stream", 250 | "text": [ 251 | "_________________________________________________________________\n", 252 | "Layer (type) Output Shape Param # \n", 253 | "=================================================================\n", 254 | "conv1d_4 (Conv1D) (None, 4, 32) 160 \n", 255 | "_________________________________________________________________\n", 256 | "lstm_4 (LSTM) (None, 4, 32) 8320 \n", 257 | "_________________________________________________________________\n", 258 | "max_pooling1d_3 (MaxPooling1 (None, 2, 32) 0 \n", 259 | "_________________________________________________________________\n", 260 | "conv1d_5 (Conv1D) (None, 2, 16) 4112 \n", 261 | "_________________________________________________________________\n", 262 | "lstm_5 (LSTM) (None, 2, 64) 20736 \n", 263 | "_________________________________________________________________\n", 264 | "max_pooling1d_4 (MaxPooling1 (None, 1, 64) 0 \n", 265 | "_________________________________________________________________\n", 266 | "conv1d_6 (Conv1D) (None, 1, 16) 8208 \n", 267 | "_________________________________________________________________\n", 268 | "lstm_6 (LSTM) (None, 128) 74240 \n", 269 | "_________________________________________________________________\n", 270 | "dense_2 (Dense) (None, 6) 774 \n", 271 | "=================================================================\n", 272 | "Total params: 116,550\n", 273 | "Trainable params: 116,550\n", 274 | "Non-trainable params: 0\n", 275 | "_________________________________________________________________\n" 276 | ] 277 | } 278 | ], 279 | "source": [ 280 | "model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n", 281 | "model.summary()" 282 | ] 283 | }, 284 | { 285 | "cell_type": "markdown", 286 | "metadata": {}, 287 | "source": [ 288 | "### Training the model" 289 | ] 290 | }, 291 | { 292 | "cell_type": "code", 293 | "execution_count": 22, 294 | "metadata": {}, 295 | "outputs": [ 296 | { 297 | "name": "stdout", 298 | "output_type": "stream", 299 | "text": [ 300 | "Epoch 1/50\n", 301 | "143/143 [==============================] - 6s 45ms/step - loss: 1.7908 - acc: 0.2448\n", 302 | "Epoch 2/50\n", 303 | "143/143 [==============================] - 0s 2ms/step - loss: 1.7872 - acc: 0.2517\n", 304 | "Epoch 3/50\n", 305 | "143/143 [==============================] - 0s 2ms/step - loss: 1.7827 - acc: 0.2517\n", 306 | "Epoch 4/50\n", 307 | "143/143 [==============================] - 0s 2ms/step - loss: 1.7694 - acc: 0.2517\n", 308 | "Epoch 5/50\n", 309 | "143/143 [==============================] - 0s 2ms/step - loss: 1.7414 - acc: 0.2517\n", 310 | "Epoch 6/50\n", 311 | "143/143 [==============================] - 0s 2ms/step - loss: 1.7119 - acc: 0.2517\n", 312 | "Epoch 7/50\n", 313 | "143/143 [==============================] - 0s 2ms/step - loss: 1.7009 - acc: 0.2517\n", 314 | "Epoch 8/50\n", 315 | "143/143 [==============================] - ETA: 0s - loss: 1.6771 - acc: 0.261 - 0s 2ms/step - loss: 1.6779 - acc: 0.2517\n", 316 | "Epoch 9/50\n", 317 | "143/143 [==============================] - 0s 2ms/step - loss: 1.6418 - acc: 0.2517\n", 318 | "Epoch 10/50\n", 319 | "143/143 [==============================] - 0s 2ms/step - loss: 1.5920 - acc: 0.2517\n", 320 | "Epoch 11/50\n", 321 | "143/143 [==============================] - 0s 2ms/step - loss: 1.5711 - acc: 0.3497\n", 322 | "Epoch 12/50\n", 323 | "143/143 [==============================] - 0s 2ms/step - loss: 1.5557 - acc: 0.4056\n", 324 | "Epoch 13/50\n", 325 | "143/143 [==============================] - 0s 2ms/step - loss: 1.5404 - acc: 0.4056\n", 326 | "Epoch 14/50\n", 327 | "143/143 [==============================] - 0s 2ms/step - loss: 1.5168 - acc: 0.4056\n", 328 | "Epoch 15/50\n", 329 | "143/143 [==============================] - 0s 2ms/step - loss: 1.4643 - acc: 0.4056\n", 330 | "Epoch 16/50\n", 331 | "143/143 [==============================] - 0s 2ms/step - loss: 1.3757 - acc: 0.4056\n", 332 | "Epoch 17/50\n", 333 | "143/143 [==============================] - 0s 2ms/step - loss: 1.2489 - acc: 0.4056\n", 334 | "Epoch 18/50\n", 335 | "143/143 [==============================] - 0s 2ms/step - loss: 1.1285 - acc: 0.4056\n", 336 | "Epoch 19/50\n", 337 | "143/143 [==============================] - 0s 2ms/step - loss: 1.0402 - acc: 0.4056\n", 338 | "Epoch 20/50\n", 339 | "143/143 [==============================] - 0s 2ms/step - loss: 0.9595 - acc: 0.4126\n", 340 | "Epoch 21/50\n", 341 | "143/143 [==============================] - 0s 2ms/step - loss: 0.8530 - acc: 0.5524\n", 342 | "Epoch 22/50\n", 343 | "143/143 [==============================] - 0s 2ms/step - loss: 0.7579 - acc: 0.7832A: 0s - loss: 0.7912 - acc: 0.79\n", 344 | "Epoch 23/50\n", 345 | "143/143 [==============================] - 0s 2ms/step - loss: 0.6705 - acc: 0.7552\n", 346 | "Epoch 24/50\n", 347 | "143/143 [==============================] - 0s 2ms/step - loss: 0.6076 - acc: 0.7692\n", 348 | "Epoch 25/50\n", 349 | "143/143 [==============================] - 0s 2ms/step - loss: 0.5725 - acc: 0.7552\n", 350 | "Epoch 26/50\n", 351 | "143/143 [==============================] - 0s 2ms/step - loss: 0.5272 - acc: 0.7552\n", 352 | "Epoch 27/50\n", 353 | "143/143 [==============================] - 0s 2ms/step - loss: 0.4996 - acc: 0.7552\n", 354 | "Epoch 28/50\n", 355 | "143/143 [==============================] - 0s 2ms/step - loss: 0.4748 - acc: 0.7552\n", 356 | "Epoch 29/50\n", 357 | "143/143 [==============================] - 0s 2ms/step - loss: 0.4528 - acc: 0.7552\n", 358 | "Epoch 30/50\n", 359 | "143/143 [==============================] - 0s 2ms/step - loss: 0.4332 - acc: 0.7552\n", 360 | "Epoch 31/50\n", 361 | "143/143 [==============================] - 0s 2ms/step - loss: 0.4209 - acc: 0.7552\n", 362 | "Epoch 32/50\n", 363 | "143/143 [==============================] - 0s 2ms/step - loss: 0.4071 - acc: 0.7552\n", 364 | "Epoch 33/50\n", 365 | "143/143 [==============================] - 0s 2ms/step - loss: 0.3947 - acc: 0.7552\n", 366 | "Epoch 34/50\n", 367 | "143/143 [==============================] - 0s 2ms/step - loss: 0.3765 - acc: 0.7552\n", 368 | "Epoch 35/50\n", 369 | "143/143 [==============================] - 0s 2ms/step - loss: 0.3726 - acc: 0.7692\n", 370 | "Epoch 36/50\n", 371 | "143/143 [==============================] - 0s 2ms/step - loss: 0.3584 - acc: 0.7762\n", 372 | "Epoch 37/50\n", 373 | "143/143 [==============================] - 0s 2ms/step - loss: 0.3408 - acc: 0.7972\n", 374 | "Epoch 38/50\n", 375 | "143/143 [==============================] - 0s 2ms/step - loss: 0.3344 - acc: 0.8042\n", 376 | "Epoch 39/50\n", 377 | "143/143 [==============================] - 0s 2ms/step - loss: 0.3259 - acc: 0.7902\n", 378 | "Epoch 40/50\n", 379 | "143/143 [==============================] - 0s 2ms/step - loss: 0.3869 - acc: 0.8322\n", 380 | "Epoch 41/50\n", 381 | "143/143 [==============================] - 0s 2ms/step - loss: 0.3753 - acc: 0.8462\n", 382 | "Epoch 42/50\n", 383 | "143/143 [==============================] - 0s 2ms/step - loss: 0.3337 - acc: 0.8671A: 0s - loss: 0.3728 - acc: 0.82\n", 384 | "Epoch 43/50\n", 385 | "143/143 [==============================] - 0s 2ms/step - loss: 0.3104 - acc: 0.9161\n", 386 | "Epoch 44/50\n", 387 | "143/143 [==============================] - 0s 2ms/step - loss: 0.2984 - acc: 0.9161\n", 388 | "Epoch 45/50\n", 389 | "143/143 [==============================] - 0s 2ms/step - loss: 0.2915 - acc: 0.9371\n", 390 | "Epoch 46/50\n", 391 | "143/143 [==============================] - 0s 2ms/step - loss: 0.2841 - acc: 0.9441\n", 392 | "Epoch 47/50\n", 393 | "143/143 [==============================] - 0s 2ms/step - loss: 0.2839 - acc: 0.9441\n", 394 | "Epoch 48/50\n", 395 | "143/143 [==============================] - ETA: 0s - loss: 0.2735 - acc: 0.938 - 0s 2ms/step - loss: 0.2650 - acc: 0.9441\n", 396 | "Epoch 49/50\n", 397 | "143/143 [==============================] - 0s 2ms/step - loss: 0.2484 - acc: 0.9441\n", 398 | "Epoch 50/50\n", 399 | "143/143 [==============================] - 0s 2ms/step - loss: 0.2392 - acc: 0.9441\n" 400 | ] 401 | } 402 | ], 403 | "source": [ 404 | "history = model.fit(X, y_train, batch_size=10, epochs=epoch)" 405 | ] 406 | }, 407 | { 408 | "cell_type": "markdown", 409 | "metadata": {}, 410 | "source": [ 411 | "## Hard Classification \n", 412 | "In hard classification each pixel belongs to one category only" 413 | ] 414 | }, 415 | { 416 | "cell_type": "code", 417 | "execution_count": 23, 418 | "metadata": {}, 419 | "outputs": [ 420 | { 421 | "data": { 422 | "application/javascript": [ 423 | "/* Put everything inside the global mpl namespace */\n", 424 | "window.mpl = {};\n", 425 | "\n", 426 | "\n", 427 | "mpl.get_websocket_type = function() {\n", 428 | " if (typeof(WebSocket) !== 'undefined') {\n", 429 | " return WebSocket;\n", 430 | " } else if (typeof(MozWebSocket) !== 'undefined') {\n", 431 | " return MozWebSocket;\n", 432 | " } else {\n", 433 | " alert('Your browser does not have WebSocket support. ' +\n", 434 | " 'Please try Chrome, Safari or Firefox ≥ 6. ' +\n", 435 | " 'Firefox 4 and 5 are also supported but you ' +\n", 436 | " 'have to enable WebSockets in about:config.');\n", 437 | " };\n", 438 | "}\n", 439 | "\n", 440 | "mpl.figure = function(figure_id, websocket, ondownload, parent_element) {\n", 441 | " this.id = figure_id;\n", 442 | "\n", 443 | " this.ws = websocket;\n", 444 | "\n", 445 | " this.supports_binary = (this.ws.binaryType != undefined);\n", 446 | "\n", 447 | " if (!this.supports_binary) {\n", 448 | " var warnings = document.getElementById(\"mpl-warnings\");\n", 449 | " if (warnings) {\n", 450 | " warnings.style.display = 'block';\n", 451 | " warnings.textContent = (\n", 452 | " \"This browser does not support binary websocket messages. \" +\n", 453 | " \"Performance may be slow.\");\n", 454 | " }\n", 455 | " }\n", 456 | "\n", 457 | " this.imageObj = new Image();\n", 458 | "\n", 459 | " this.context = undefined;\n", 460 | " this.message = undefined;\n", 461 | " this.canvas = undefined;\n", 462 | " this.rubberband_canvas = undefined;\n", 463 | " this.rubberband_context = undefined;\n", 464 | " this.format_dropdown = undefined;\n", 465 | "\n", 466 | " this.image_mode = 'full';\n", 467 | "\n", 468 | " this.root = $('
');\n", 469 | " this._root_extra_style(this.root)\n", 470 | " this.root.attr('style', 'display: inline-block');\n", 471 | "\n", 472 | " $(parent_element).append(this.root);\n", 473 | "\n", 474 | " this._init_header(this);\n", 475 | " this._init_canvas(this);\n", 476 | " this._init_toolbar(this);\n", 477 | "\n", 478 | " var fig = this;\n", 479 | "\n", 480 | " this.waiting = false;\n", 481 | "\n", 482 | " this.ws.onopen = function () {\n", 483 | " fig.send_message(\"supports_binary\", {value: fig.supports_binary});\n", 484 | " fig.send_message(\"send_image_mode\", {});\n", 485 | " if (mpl.ratio != 1) {\n", 486 | " fig.send_message(\"set_dpi_ratio\", {'dpi_ratio': mpl.ratio});\n", 487 | " }\n", 488 | " fig.send_message(\"refresh\", {});\n", 489 | " }\n", 490 | "\n", 491 | " this.imageObj.onload = function() {\n", 492 | " if (fig.image_mode == 'full') {\n", 493 | " // Full images could contain transparency (where diff images\n", 494 | " // almost always do), so we need to clear the canvas so that\n", 495 | " // there is no ghosting.\n", 496 | " fig.context.clearRect(0, 0, fig.canvas.width, fig.canvas.height);\n", 497 | " }\n", 498 | " fig.context.drawImage(fig.imageObj, 0, 0);\n", 499 | " };\n", 500 | "\n", 501 | " this.imageObj.onunload = function() {\n", 502 | " fig.ws.close();\n", 503 | " }\n", 504 | "\n", 505 | " this.ws.onmessage = this._make_on_message_function(this);\n", 506 | "\n", 507 | " this.ondownload = ondownload;\n", 508 | "}\n", 509 | "\n", 510 | "mpl.figure.prototype._init_header = function() {\n", 511 | " var titlebar = $(\n", 512 | " '
');\n", 514 | " var titletext = $(\n", 515 | " '
');\n", 517 | " titlebar.append(titletext)\n", 518 | " this.root.append(titlebar);\n", 519 | " this.header = titletext[0];\n", 520 | "}\n", 521 | "\n", 522 | "\n", 523 | "\n", 524 | "mpl.figure.prototype._canvas_extra_style = function(canvas_div) {\n", 525 | "\n", 526 | "}\n", 527 | "\n", 528 | "\n", 529 | "mpl.figure.prototype._root_extra_style = function(canvas_div) {\n", 530 | "\n", 531 | "}\n", 532 | "\n", 533 | "mpl.figure.prototype._init_canvas = function() {\n", 534 | " var fig = this;\n", 535 | "\n", 536 | " var canvas_div = $('
');\n", 537 | "\n", 538 | " canvas_div.attr('style', 'position: relative; clear: both; outline: 0');\n", 539 | "\n", 540 | " function canvas_keyboard_event(event) {\n", 541 | " return fig.key_event(event, event['data']);\n", 542 | " }\n", 543 | "\n", 544 | " canvas_div.keydown('key_press', canvas_keyboard_event);\n", 545 | " canvas_div.keyup('key_release', canvas_keyboard_event);\n", 546 | " this.canvas_div = canvas_div\n", 547 | " this._canvas_extra_style(canvas_div)\n", 548 | " this.root.append(canvas_div);\n", 549 | "\n", 550 | " var canvas = $('');\n", 551 | " canvas.addClass('mpl-canvas');\n", 552 | " canvas.attr('style', \"left: 0; top: 0; z-index: 0; outline: 0\")\n", 553 | "\n", 554 | " this.canvas = canvas[0];\n", 555 | " this.context = canvas[0].getContext(\"2d\");\n", 556 | "\n", 557 | " var backingStore = this.context.backingStorePixelRatio ||\n", 558 | "\tthis.context.webkitBackingStorePixelRatio ||\n", 559 | "\tthis.context.mozBackingStorePixelRatio ||\n", 560 | "\tthis.context.msBackingStorePixelRatio ||\n", 561 | "\tthis.context.oBackingStorePixelRatio ||\n", 562 | "\tthis.context.backingStorePixelRatio || 1;\n", 563 | "\n", 564 | " mpl.ratio = (window.devicePixelRatio || 1) / backingStore;\n", 565 | "\n", 566 | " var rubberband = $('');\n", 567 | " rubberband.attr('style', \"position: absolute; left: 0; top: 0; z-index: 1;\")\n", 568 | "\n", 569 | " var pass_mouse_events = true;\n", 570 | "\n", 571 | " canvas_div.resizable({\n", 572 | " start: function(event, ui) {\n", 573 | " pass_mouse_events = false;\n", 574 | " },\n", 575 | " resize: function(event, ui) {\n", 576 | " fig.request_resize(ui.size.width, ui.size.height);\n", 577 | " },\n", 578 | " stop: function(event, ui) {\n", 579 | " pass_mouse_events = true;\n", 580 | " fig.request_resize(ui.size.width, ui.size.height);\n", 581 | " },\n", 582 | " });\n", 583 | "\n", 584 | " function mouse_event_fn(event) {\n", 585 | " if (pass_mouse_events)\n", 586 | " return fig.mouse_event(event, event['data']);\n", 587 | " }\n", 588 | "\n", 589 | " rubberband.mousedown('button_press', mouse_event_fn);\n", 590 | " rubberband.mouseup('button_release', mouse_event_fn);\n", 591 | " // Throttle sequential mouse events to 1 every 20ms.\n", 592 | " rubberband.mousemove('motion_notify', mouse_event_fn);\n", 593 | "\n", 594 | " rubberband.mouseenter('figure_enter', mouse_event_fn);\n", 595 | " rubberband.mouseleave('figure_leave', mouse_event_fn);\n", 596 | "\n", 597 | " canvas_div.on(\"wheel\", function (event) {\n", 598 | " event = event.originalEvent;\n", 599 | " event['data'] = 'scroll'\n", 600 | " if (event.deltaY < 0) {\n", 601 | " event.step = 1;\n", 602 | " } else {\n", 603 | " event.step = -1;\n", 604 | " }\n", 605 | " mouse_event_fn(event);\n", 606 | " });\n", 607 | "\n", 608 | " canvas_div.append(canvas);\n", 609 | " canvas_div.append(rubberband);\n", 610 | "\n", 611 | " this.rubberband = rubberband;\n", 612 | " this.rubberband_canvas = rubberband[0];\n", 613 | " this.rubberband_context = rubberband[0].getContext(\"2d\");\n", 614 | " this.rubberband_context.strokeStyle = \"#000000\";\n", 615 | "\n", 616 | " this._resize_canvas = function(width, height) {\n", 617 | " // Keep the size of the canvas, canvas container, and rubber band\n", 618 | " // canvas in synch.\n", 619 | " canvas_div.css('width', width)\n", 620 | " canvas_div.css('height', height)\n", 621 | "\n", 622 | " canvas.attr('width', width * mpl.ratio);\n", 623 | " canvas.attr('height', height * mpl.ratio);\n", 624 | " canvas.attr('style', 'width: ' + width + 'px; height: ' + height + 'px;');\n", 625 | "\n", 626 | " rubberband.attr('width', width);\n", 627 | " rubberband.attr('height', height);\n", 628 | " }\n", 629 | "\n", 630 | " // Set the figure to an initial 600x600px, this will subsequently be updated\n", 631 | " // upon first draw.\n", 632 | " this._resize_canvas(600, 600);\n", 633 | "\n", 634 | " // Disable right mouse context menu.\n", 635 | " $(this.rubberband_canvas).bind(\"contextmenu\",function(e){\n", 636 | " return false;\n", 637 | " });\n", 638 | "\n", 639 | " function set_focus () {\n", 640 | " canvas.focus();\n", 641 | " canvas_div.focus();\n", 642 | " }\n", 643 | "\n", 644 | " window.setTimeout(set_focus, 100);\n", 645 | "}\n", 646 | "\n", 647 | "mpl.figure.prototype._init_toolbar = function() {\n", 648 | " var fig = this;\n", 649 | "\n", 650 | " var nav_element = $('
');\n", 651 | " nav_element.attr('style', 'width: 100%');\n", 652 | " this.root.append(nav_element);\n", 653 | "\n", 654 | " // Define a callback function for later on.\n", 655 | " function toolbar_event(event) {\n", 656 | " return fig.toolbar_button_onclick(event['data']);\n", 657 | " }\n", 658 | " function toolbar_mouse_event(event) {\n", 659 | " return fig.toolbar_button_onmouseover(event['data']);\n", 660 | " }\n", 661 | "\n", 662 | " for(var toolbar_ind in mpl.toolbar_items) {\n", 663 | " var name = mpl.toolbar_items[toolbar_ind][0];\n", 664 | " var tooltip = mpl.toolbar_items[toolbar_ind][1];\n", 665 | " var image = mpl.toolbar_items[toolbar_ind][2];\n", 666 | " var method_name = mpl.toolbar_items[toolbar_ind][3];\n", 667 | "\n", 668 | " if (!name) {\n", 669 | " // put a spacer in here.\n", 670 | " continue;\n", 671 | " }\n", 672 | " var button = $('');\n", 1918 | " button.click(method_name, toolbar_event);\n", 1919 | " button.mouseover(tooltip, toolbar_mouse_event);\n", 1920 | " nav_element.append(button);\n", 1921 | " }\n", 1922 | "\n", 1923 | " // Add the status bar.\n", 1924 | " var status_bar = $('');\n", 1925 | " nav_element.append(status_bar);\n", 1926 | " this.message = status_bar[0];\n", 1927 | "\n", 1928 | " // Add the close button to the window.\n", 1929 | " var buttongrp = $('
');\n", 1930 | " var button = $('');\n", 1931 | " button.click(function (evt) { fig.handle_close(fig, {}); } );\n", 1932 | " button.mouseover('Stop Interaction', toolbar_mouse_event);\n", 1933 | " buttongrp.append(button);\n", 1934 | " var titlebar = this.root.find($('.ui-dialog-titlebar'));\n", 1935 | " titlebar.prepend(buttongrp);\n", 1936 | "}\n", 1937 | "\n", 1938 | "mpl.figure.prototype._root_extra_style = function(el){\n", 1939 | " var fig = this\n", 1940 | " el.on(\"remove\", function(){\n", 1941 | "\tfig.close_ws(fig, {});\n", 1942 | " });\n", 1943 | "}\n", 1944 | "\n", 1945 | "mpl.figure.prototype._canvas_extra_style = function(el){\n", 1946 | " // this is important to make the div 'focusable\n", 1947 | " el.attr('tabindex', 0)\n", 1948 | " // reach out to IPython and tell the keyboard manager to turn it's self\n", 1949 | " // off when our div gets focus\n", 1950 | "\n", 1951 | " // location in version 3\n", 1952 | " if (IPython.notebook.keyboard_manager) {\n", 1953 | " IPython.notebook.keyboard_manager.register_events(el);\n", 1954 | " }\n", 1955 | " else {\n", 1956 | " // location in version 2\n", 1957 | " IPython.keyboard_manager.register_events(el);\n", 1958 | " }\n", 1959 | "\n", 1960 | "}\n", 1961 | "\n", 1962 | "mpl.figure.prototype._key_event_extra = function(event, name) {\n", 1963 | " var manager = IPython.notebook.keyboard_manager;\n", 1964 | " if (!manager)\n", 1965 | " manager = IPython.keyboard_manager;\n", 1966 | "\n", 1967 | " // Check for shift+enter\n", 1968 | " if (event.shiftKey && event.which == 13) {\n", 1969 | " this.canvas_div.blur();\n", 1970 | " event.shiftKey = false;\n", 1971 | " // Send a \"J\" for go to next cell\n", 1972 | " event.which = 74;\n", 1973 | " event.keyCode = 74;\n", 1974 | " manager.command_mode();\n", 1975 | " manager.handle_keydown(event);\n", 1976 | " }\n", 1977 | "}\n", 1978 | "\n", 1979 | "mpl.figure.prototype.handle_save = function(fig, msg) {\n", 1980 | " fig.ondownload(fig, null);\n", 1981 | "}\n", 1982 | "\n", 1983 | "\n", 1984 | "mpl.find_output_cell = function(html_output) {\n", 1985 | " // Return the cell and output element which can be found *uniquely* in the notebook.\n", 1986 | " // Note - this is a bit hacky, but it is done because the \"notebook_saving.Notebook\"\n", 1987 | " // IPython event is triggered only after the cells have been serialised, which for\n", 1988 | " // our purposes (turning an active figure into a static one), is too late.\n", 1989 | " var cells = IPython.notebook.get_cells();\n", 1990 | " var ncells = cells.length;\n", 1991 | " for (var i=0; i= 3 moved mimebundle to data attribute of output\n", 1998 | " data = data.data;\n", 1999 | " }\n", 2000 | " if (data['text/html'] == html_output) {\n", 2001 | " return [cell, data, j];\n", 2002 | " }\n", 2003 | " }\n", 2004 | " }\n", 2005 | " }\n", 2006 | "}\n", 2007 | "\n", 2008 | "// Register the function which deals with the matplotlib target/channel.\n", 2009 | "// The kernel may be null if the page has been refreshed.\n", 2010 | "if (IPython.notebook.kernel != null) {\n", 2011 | " IPython.notebook.kernel.comm_manager.register_target('matplotlib', mpl.mpl_figure_comm);\n", 2012 | "}\n" 2013 | ], 2014 | "text/plain": [ 2015 | "" 2016 | ] 2017 | }, 2018 | "metadata": {}, 2019 | "output_type": "display_data" 2020 | }, 2021 | { 2022 | "data": { 2023 | "text/html": [ 2024 | "" 2025 | ], 2026 | "text/plain": [ 2027 | "" 2028 | ] 2029 | }, 2030 | "metadata": {}, 2031 | "output_type": "display_data" 2032 | } 2033 | ], 2034 | "source": [ 2035 | "%matplotlib notebook\n", 2036 | "arg = c_l[category]\n", 2037 | "print('Class number: ', arg)\n", 2038 | "y1 = extract_membership(y_test_new, arg)\n", 2039 | "y1_th = thresholding(y1, threshold)\n", 2040 | "y1_std = linear_stretch(y1_th)\n", 2041 | "\n", 2042 | "strg = category\n", 2043 | "name = 'soft'+ strg\n", 2044 | "\n", 2045 | "display_save_image(y1_std, row, col, name)" 2046 | ] 2047 | }, 2048 | { 2049 | "cell_type": "code", 2050 | "execution_count": null, 2051 | "metadata": {}, 2052 | "outputs": [], 2053 | "source": [] 2054 | } 2055 | ], 2056 | "metadata": { 2057 | "kernelspec": { 2058 | "display_name": "Python 3", 2059 | "language": "python", 2060 | "name": "python3" 2061 | }, 2062 | "language_info": { 2063 | "codemirror_mode": { 2064 | "name": "ipython", 2065 | "version": 3 2066 | }, 2067 | "file_extension": ".py", 2068 | "mimetype": "text/x-python", 2069 | "name": "python", 2070 | "nbconvert_exporter": "python", 2071 | "pygments_lexer": "ipython3", 2072 | "version": "3.7.1" 2073 | } 2074 | }, 2075 | "nbformat": 4, 2076 | "nbformat_minor": 2 2077 | } 2078 | --------------------------------------------------------------------------------