├── README.md └── MorphCNN.ipynb /README.md: -------------------------------------------------------------------------------- 1 | The repository contains the implementations for [Morphological Convolutional Neural Networks for Hyperspectral Image Classification](https://ieeexplore.ieee.org/document/9451651). 2 | 3 | ### Datasets 4 | 5 | You can get the dataset from our [HSI-Traditional-to-Deep-Models](https://github.com/AnkurDeria/HSI-Traditional-to-Deep-Models) repository (don't forget to give a star). Put the dataset folder in the root directory before running the notebook. 6 | 7 | 8 | If you have questions or suggestions, please feel free to open an issue. Please cite as: 9 | ``` 10 | @article{roy2021morphological, 11 | title={Morphological Convolutional Neural Networks for Hyperspectral Image Classification}, 12 | author={Swalpa Kumar Roy and Ranjan Mondal and Mercedes E Paoletti and Juan M Haut and Antonio Plaza}, 13 | journal={IEEE Journal of Selected Topics in Applied Earth Observations and Remote Sensing}, 14 | volume={14}, 15 | pages={8689--8702}, 16 | year={2021}, 17 | publisher={IEEE} 18 | } 19 | ``` 20 | ### Acknowledgement 21 | 22 | Part of this code is from an implementation of 2D Morphological Network CNN by [ranjanZ](https://github.com/ranjanZ/2D-Morphological-Network). 23 | -------------------------------------------------------------------------------- /MorphCNN.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "id": "e45d67ea", 7 | "metadata": { 8 | "ExecuteTime": { 9 | "start_time": "2021-11-11T11:40:04.505Z" 10 | }, 11 | "code_folding": [] 12 | }, 13 | "outputs": [ 14 | { 15 | "name": "stderr", 16 | "output_type": "stream", 17 | "text": [ 18 | "2021-11-11 03:40:36.060458: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudart.so.10.2\n" 19 | ] 20 | }, 21 | { 22 | "name": "stdout", 23 | "output_type": "stream", 24 | "text": [ 25 | "Trento\n", 26 | "True\n", 27 | "(819, 11, 11, 63)\n", 28 | "Test patch 1 shape torch.Size([29395, 11, 11, 63])\n", 29 | "6\n", 30 | "WARNING:tensorflow:From /tmp/ipykernel_10929/2684546465.py:136: The name tf.keras.backend.set_session is deprecated. Please use tf.compat.v1.keras.backend.set_session instead.\n", 31 | "\n", 32 | "WARNING:tensorflow:From /home/users/jgecvision/.conda/envs/purb37/lib/python3.7/site-packages/tensorflow/python/ops/resource_variable_ops.py:1666: calling BaseResourceVariable.__init__ (from tensorflow.python.ops.resource_variable_ops) with constraint is deprecated and will be removed in a future version.\n", 33 | "Instructions for updating:\n", 34 | "If using Keras pass *_constraint arguments to layers.\n" 35 | ] 36 | }, 37 | { 38 | "name": "stderr", 39 | "output_type": "stream", 40 | "text": [ 41 | "2021-11-11 03:40:39.359299: I tensorflow/core/platform/profile_utils/cpu_utils.cc:102] CPU Frequency: 3783000000 Hz\n", 42 | "2021-11-11 03:40:39.366322: I tensorflow/compiler/xla/service/service.cc:168] XLA service 0x148881220 initialized for platform Host (this does not guarantee that XLA will be used). Devices:\n", 43 | "2021-11-11 03:40:39.366350: I tensorflow/compiler/xla/service/service.cc:176] StreamExecutor device (0): Host, Default Version\n", 44 | "2021-11-11 03:40:39.370349: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcuda.so.1\n", 45 | "2021-11-11 03:40:39.399778: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1105] Device interconnect StreamExecutor with strength 1 edge matrix:\n", 46 | "2021-11-11 03:40:39.399798: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1111] \n", 47 | "2021-11-11 03:40:39.466478: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1564] Found device 0 with properties: \n", 48 | "pciBusID: 0035:04:00.0 name: Tesla V100-SXM2-32GB computeCapability: 7.0\n", 49 | "coreClock: 1.53GHz coreCount: 80 deviceMemorySize: 31.50GiB deviceMemoryBandwidth: 836.37GiB/s\n", 50 | "2021-11-11 03:40:39.466520: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudart.so.10.2\n", 51 | "2021-11-11 03:40:39.466581: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcublas.so.10\n", 52 | "2021-11-11 03:40:39.466619: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcufft.so.10\n", 53 | "2021-11-11 03:40:39.466656: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcurand.so.10\n", 54 | "2021-11-11 03:40:39.469092: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcusolver.so.10\n", 55 | "2021-11-11 03:40:39.469141: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcusparse.so.10\n", 56 | "2021-11-11 03:40:39.469179: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudnn.so.7\n", 57 | "2021-11-11 03:40:39.473393: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1706] Adding visible gpu devices: 0\n" 58 | ] 59 | }, 60 | { 61 | "name": "stdout", 62 | "output_type": "stream", 63 | "text": [ 64 | "Model: \"model\"\n", 65 | "_________________________________________________________________\n", 66 | "Layer (type) Output Shape Param # \n", 67 | "=================================================================\n", 68 | "input_1 (InputLayer) [(None, 11, 11, 63)] 0 \n", 69 | "_________________________________________________________________\n", 70 | "conv2d (Conv2D) (None, 11, 11, 64) 36352 \n", 71 | "_________________________________________________________________\n", 72 | "batch_normalization (BatchNo (None, 11, 11, 64) 256 \n", 73 | "_________________________________________________________________\n", 74 | "activation (Activation) (None, 11, 11, 64) 0 \n", 75 | "_________________________________________________________________\n", 76 | "spectral_morph (SpectralMorp (None, 11, 11, 64) 0 \n", 77 | "_________________________________________________________________\n", 78 | "batch_normalization_1 (Batch (None, 11, 11, 64) 256 \n", 79 | "_________________________________________________________________\n", 80 | "activation_1 (Activation) (None, 11, 11, 64) 0 \n", 81 | "_________________________________________________________________\n", 82 | "conv2d_3 (Conv2D) (None, 11, 11, 64) 36928 \n", 83 | "_________________________________________________________________\n", 84 | "batch_normalization_2 (Batch (None, 11, 11, 64) 256 \n", 85 | "_________________________________________________________________\n", 86 | "activation_2 (Activation) (None, 11, 11, 64) 0 \n", 87 | "_________________________________________________________________\n", 88 | "spatial_morph (SpatialMorph) (None, 11, 11, 64) 0 \n", 89 | "_________________________________________________________________\n", 90 | "batch_normalization_3 (Batch (None, 11, 11, 64) 256 \n", 91 | "_________________________________________________________________\n", 92 | "activation_3 (Activation) (None, 11, 11, 64) 0 \n", 93 | "_________________________________________________________________\n", 94 | "conv2d_6 (Conv2D) (None, 11, 11, 128) 73856 \n", 95 | "_________________________________________________________________\n", 96 | "batch_normalization_4 (Batch (None, 11, 11, 128) 512 \n", 97 | "_________________________________________________________________\n", 98 | "activation_4 (Activation) (None, 11, 11, 128) 0 \n", 99 | "_________________________________________________________________\n", 100 | "max_pooling2d (MaxPooling2D) (None, 6, 6, 128) 0 \n", 101 | "_________________________________________________________________\n", 102 | "conv2d_7 (Conv2D) (None, 6, 6, 512) 590336 \n", 103 | "_________________________________________________________________\n", 104 | "batch_normalization_5 (Batch (None, 6, 6, 512) 2048 \n", 105 | "_________________________________________________________________\n", 106 | "activation_5 (Activation) (None, 6, 6, 512) 0 \n", 107 | "_________________________________________________________________\n", 108 | "global_average_pooling2d (Gl (None, 512) 0 \n", 109 | "_________________________________________________________________\n", 110 | "dense (Dense) (None, 128) 65664 \n", 111 | "_________________________________________________________________\n", 112 | "batch_normalization_6 (Batch (None, 128) 512 \n", 113 | "_________________________________________________________________\n", 114 | "dropout (Dropout) (None, 128) 0 \n", 115 | "_________________________________________________________________\n", 116 | "dense_1 (Dense) (None, 64) 8256 \n", 117 | "_________________________________________________________________\n", 118 | "batch_normalization_7 (Batch (None, 64) 256 \n", 119 | "_________________________________________________________________\n", 120 | "dropout_1 (Dropout) (None, 64) 0 \n", 121 | "_________________________________________________________________\n", 122 | "dense_2 (Dense) (None, 6) 390 \n", 123 | "=================================================================\n", 124 | "Total params: 816,134\n", 125 | "Trainable params: 813,958\n", 126 | "Non-trainable params: 2,176\n", 127 | "_________________________________________________________________\n", 128 | "Train on 819 samples, validate on 29395 samples\n", 129 | "WARNING:tensorflow:OMP_NUM_THREADS is no longer used by the default Keras config. To configure the number of threads, use tf.config.threading APIs.\n" 130 | ] 131 | }, 132 | { 133 | "name": "stderr", 134 | "output_type": "stream", 135 | "text": [ 136 | "2021-11-11 03:40:42.212697: I tensorflow/compiler/xla/service/service.cc:168] XLA service 0x14a497b20 initialized for platform CUDA (this does not guarantee that XLA will be used). Devices:\n", 137 | "2021-11-11 03:40:42.212828: I tensorflow/compiler/xla/service/service.cc:176] StreamExecutor device (0): Tesla V100-SXM2-32GB, Compute Capability 7.0\n", 138 | "2021-11-11 03:40:42.215144: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1564] Found device 0 with properties: \n", 139 | "pciBusID: 0035:04:00.0 name: Tesla V100-SXM2-32GB computeCapability: 7.0\n", 140 | "coreClock: 1.53GHz coreCount: 80 deviceMemorySize: 31.50GiB deviceMemoryBandwidth: 836.37GiB/s\n", 141 | "2021-11-11 03:40:42.215177: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudart.so.10.2\n", 142 | "2021-11-11 03:40:42.215193: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcublas.so.10\n", 143 | "2021-11-11 03:40:42.215206: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcufft.so.10\n", 144 | "2021-11-11 03:40:42.215218: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcurand.so.10\n", 145 | "2021-11-11 03:40:42.215267: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcusolver.so.10\n", 146 | "2021-11-11 03:40:42.215283: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcusparse.so.10\n", 147 | "2021-11-11 03:40:42.215296: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudnn.so.7\n", 148 | "2021-11-11 03:40:42.219447: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1706] Adding visible gpu devices: 0\n", 149 | "2021-11-11 03:40:44.122556: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1105] Device interconnect StreamExecutor with strength 1 edge matrix:\n", 150 | "2021-11-11 03:40:44.122753: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1111] 0 \n", 151 | "2021-11-11 03:40:44.122772: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1124] 0: N \n", 152 | "2021-11-11 03:40:44.127540: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1250] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 29696 MB memory) -> physical GPU (device: 0, name: Tesla V100-SXM2-32GB, pci bus id: 0035:04:00.0, compute capability: 7.0)\n" 153 | ] 154 | }, 155 | { 156 | "name": "stdout", 157 | "output_type": "stream", 158 | "text": [ 159 | "Epoch 1/200\n", 160 | "819/819 [==============================] - 11s 13ms/sample - loss: 1.0501 - accuracy: 0.6654 - val_loss: 1.6717 - val_accuracy: 0.4547\n", 161 | "Epoch 2/200\n", 162 | "819/819 [==============================] - 9s 11ms/sample - loss: 0.3356 - accuracy: 0.9170 - val_loss: 1.5281 - val_accuracy: 0.5744\n", 163 | "Epoch 3/200\n", 164 | "819/819 [==============================] - 9s 11ms/sample - loss: 0.2160 - accuracy: 0.9475 - val_loss: 1.2191 - val_accuracy: 0.4414\n", 165 | "Epoch 4/200\n", 166 | "819/819 [==============================] - 9s 11ms/sample - loss: 0.1403 - accuracy: 0.9634 - val_loss: 1.2573 - val_accuracy: 0.5101\n", 167 | "Epoch 5/200\n", 168 | "819/819 [==============================] - 9s 11ms/sample - loss: 0.0801 - accuracy: 0.9878 - val_loss: 1.7978 - val_accuracy: 0.2250\n", 169 | "Epoch 6/200\n", 170 | "819/819 [==============================] - 12s 14ms/sample - loss: 0.0538 - accuracy: 0.9939 - val_loss: 2.0616 - val_accuracy: 0.2258\n", 171 | "Epoch 7/200\n", 172 | "819/819 [==============================] - ETA: 0s - loss: 0.0506 - accuracy: 0.9927" 173 | ] 174 | } 175 | ], 176 | "source": [ 177 | "'''\n", 178 | "Re-implementation for paper \"Morphological Convolutional Neural Networks for Hyperspectral Image Classification\"\n", 179 | "The official implementation is in https://github.com/ranjanZ/2D-Morphological-Network\n", 180 | "'''\n", 181 | "\n", 182 | "############ IMPORTS ####################\n", 183 | "\n", 184 | "# import sys\n", 185 | "# sys.path.insert(0,'/home/users/jgecvision/.conda/envs/ankur01/lib/python3.8/site-packages')\n", 186 | "import os\n", 187 | "import numpy as np\n", 188 | "import torch.utils.data as dataf\n", 189 | "from scipy import io\n", 190 | "import sys\n", 191 | "import spectral\n", 192 | "import tensorflow as tf\n", 193 | "from sklearn.decomposition import PCA\n", 194 | "sys.path.append(\"./../\")\n", 195 | "from PIL import Image\n", 196 | "import numpy as np\n", 197 | "from sklearn.metrics import confusion_matrix, accuracy_score, classification_report, cohen_kappa_score\n", 198 | "import torch\n", 199 | "from operator import truediv\n", 200 | "import record\n", 201 | "from tensorflow import keras\n", 202 | "import tensorflow.keras.backend as K\n", 203 | "from tensorflow.keras.callbacks import ModelCheckpoint\n", 204 | "from keras.engine.topology import Layer\n", 205 | "from tensorflow.keras import initializers, constraints\n", 206 | "from tensorflow.keras.models import load_model\n", 207 | "from tensorflow.keras.models import Sequential\n", 208 | "from keras.utils import conv_utils\n", 209 | "from tensorflow.keras.losses import categorical_crossentropy\n", 210 | "from tensorflow.keras.layers import InputSpec, GlobalAveragePooling2D\n", 211 | "from tensorflow.keras.layers import Dense, Dropout, Flatten\n", 212 | "from tensorflow.keras.layers import Conv2D, MaxPooling2D\n", 213 | "from tensorflow.keras.layers import Conv2D, Conv1D, MaxPooling2D, Flatten, Dense\n", 214 | "from tensorflow.keras.layers import Reshape, BatchNormalization, Layer, Dropout\n", 215 | "from tensorflow.keras.layers import Dropout, Input, LeakyReLU, Multiply\n", 216 | "from tensorflow.keras.layers import GlobalAveragePooling2D, AveragePooling2D\n", 217 | "from tensorflow.keras.layers import Add, Activation, GlobalMaxPooling2D\n", 218 | "from tensorflow.keras.models import Model\n", 219 | "from tensorflow.keras.optimizers import Adam, SGD\n", 220 | "from tensorflow.keras import regularizers\n", 221 | "from tensorflow.keras.utils import to_categorical as keras_to_categorical\n", 222 | "\n", 223 | "\n", 224 | "############# CONFIGS ##########################\n", 225 | "\n", 226 | "# tf.config.experimental_run_functions_eagerly(True)\n", 227 | "# os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"3\"\n", 228 | "\n", 229 | "datasetNames = [\"Trento\"]\n", 230 | "testSizeNumber = 5000\n", 231 | "patchsize1 = 11\n", 232 | "patchsize2 = 11\n", 233 | "batchsize = 64\n", 234 | "EPOCH = 200\n", 235 | "LR = 0.001\n", 236 | "\n", 237 | "def AA_andEachClassAccuracy(confusion_matrix):\n", 238 | " counter = confusion_matrix.shape[0]\n", 239 | " list_diag = np.diag(confusion_matrix)\n", 240 | " list_raw_sum = np.sum(confusion_matrix, axis=1)\n", 241 | " each_acc = np.nan_to_num(truediv(list_diag, list_raw_sum))\n", 242 | " average_acc = np.mean(each_acc)\n", 243 | " return each_acc*100, average_acc*100\n", 244 | "\n", 245 | "\n", 246 | "\n", 247 | "\n", 248 | "for datasetName in datasetNames:\n", 249 | " \n", 250 | " print(\"----------------------------------Training for \",datasetName,\"---------------------------------------------\")\n", 251 | "\n", 252 | " try:\n", 253 | " os.makedirs(datasetName)\n", 254 | " except FileExistsError:\n", 255 | " pass\n", 256 | " # Train data\n", 257 | " HSI = io.loadmat('./../'+datasetName+'11x11/HSI_Tr.mat')\n", 258 | " TrainPatch = HSI['Data']\n", 259 | " TrainPatch = TrainPatch.astype(np.float32)\n", 260 | " NC = TrainPatch.shape[3] # NC is number of bands\n", 261 | "\n", 262 | " label = io.loadmat('./../'+datasetName+'11x11/TrLabel.mat')\n", 263 | " TrLabel = label['Data']\n", 264 | "\n", 265 | " # Test data\n", 266 | " HSI = io.loadmat('./../'+datasetName+'11x11/HSI_Te.mat')\n", 267 | " TestPatch = HSI['Data']\n", 268 | " TestPatch = TestPatch.astype(np.float32)\n", 269 | "\n", 270 | " label = io.loadmat('./../'+datasetName+'11x11/TeLabel.mat')\n", 271 | " TsLabel = label['Data']\n", 272 | "\n", 273 | "\n", 274 | " TrainPatch1 = torch.from_numpy(TrainPatch)\n", 275 | " # TrainPatch1 = TrainPatch1.permute(0,3,1,2)\n", 276 | " TrainLabel1 = torch.from_numpy(TrLabel)-1\n", 277 | " TrainLabel1 = TrainLabel1.long()\n", 278 | "\n", 279 | "\n", 280 | " TestPatch1 = torch.from_numpy(TestPatch)\n", 281 | " # TestPatch1 = TestPatch1.permute(0,3,1,2)\n", 282 | " TestLabel1 = torch.from_numpy(TsLabel)-1\n", 283 | " TestLabel1 = TestLabel1.long()\n", 284 | "\n", 285 | " Classes = len(np.unique(TrainLabel1))\n", 286 | " TrainPatch1 = TrainPatch1.reshape(TrainPatch1.shape[0],TrainPatch1.shape[1]*TrainPatch1.shape[2],TrainPatch1.shape[3])\n", 287 | " TestPatch1 = TestPatch1.reshape(TestPatch1.shape[0],TestPatch1.shape[1]*TestPatch1.shape[2],TestPatch1.shape[3])\n", 288 | " print(\"Train data shape = \", TrainPatch1.shape)\n", 289 | " print(\"Train label shape = \", TrainLabel1.shape)\n", 290 | " print(\"Test data shape = \", TestPatch1.shape)\n", 291 | " print(\"Test label shape = \", TestLabel1.shape)\n", 292 | " \n", 293 | " \n", 294 | " \n", 295 | " KAPPA = []\n", 296 | " OA = []\n", 297 | " AA = []\n", 298 | " ELEMENT_ACC = np.zeros((3, Classes))\n", 299 | " tf.compat.v1.keras.backend.clear_session()\n", 300 | " config = tf.compat.v1.ConfigProto( device_count = {'GPU': 0} ) \n", 301 | " config.gpu_options.allow_growth = True\n", 302 | " sess = tf.compat.v1.Session(config=config) \n", 303 | " tf.compat.v1.keras.backend.set_session(sess)\n", 304 | " g = tf.Graph()\n", 305 | " with g.as_default():\n", 306 | " filters = 64\n", 307 | " class Erosion2D(Layer):\n", 308 | " '''\n", 309 | " Erosion 2D Layer\n", 310 | " for now assuming channel last\n", 311 | " '''\n", 312 | "\n", 313 | " def __init__(self, num_filters, kernel_size, strides=(1, 1),\n", 314 | " padding='same', kernel_initializer='glorot_uniform',\n", 315 | " kernel_constraint=None,\n", 316 | " **kwargs):\n", 317 | " super(Erosion2D, self).__init__(**kwargs)\n", 318 | " self.num_filters = num_filters\n", 319 | " self.kernel_size = kernel_size\n", 320 | " self.strides = strides\n", 321 | " self.padding = padding\n", 322 | "\n", 323 | " self.kernel_initializer = initializers.get(kernel_initializer)\n", 324 | " self.kernel_constraint = constraints.get(kernel_constraint)\n", 325 | " # for we are assuming channel last\n", 326 | " self.channel_axis = -1\n", 327 | "\n", 328 | " # self.output_dim = output_dim\n", 329 | "\n", 330 | " def build(self, input_shape):\n", 331 | " if input_shape[self.channel_axis] is None:\n", 332 | " raise ValueError('The channel dimension of the inputs '\n", 333 | " 'should be defined. Found `None`.')\n", 334 | "\n", 335 | " input_dim = input_shape[self.channel_axis]\n", 336 | " kernel_shape = self.kernel_size + (input_dim, self.num_filters)\n", 337 | "\n", 338 | " self.kernel = self.add_weight(shape=kernel_shape,\n", 339 | " initializer=self.kernel_initializer,\n", 340 | " name='kernel',\n", 341 | " constraint=self.kernel_constraint)\n", 342 | "\n", 343 | " # Be sure to call this at the end\n", 344 | " super(Erosion2D, self).build(input_shape)\n", 345 | " self.input_spec = InputSpec(ndim=4,\n", 346 | " axes={3: input_shape[-1]})\n", 347 | " self.built = True\n", 348 | "\n", 349 | " def call(self, x):\n", 350 | " outputs = self.erosion2d(x, self.kernel[...,1],\n", 351 | " self.strides, self.padding)\n", 352 | "\n", 353 | " return outputs\n", 354 | "\n", 355 | " def erosion2d(self, x, st_element, strides, padding,\n", 356 | " rates=(1, 1, 1, 1)):\n", 357 | " # tf.nn.erosion2d(input, filter, strides, rates, padding, name=None)\n", 358 | " x = tf.compat.v1.nn.erosion2d(x, st_element, (1, ) + strides + (1, ),\n", 359 | " rates, padding.upper())\n", 360 | " return x\n", 361 | "\n", 362 | " class Dilation2D(Layer):\n", 363 | " '''\n", 364 | " Dilation 2D Layer\n", 365 | " for now assuming channel last\n", 366 | " '''\n", 367 | "\n", 368 | " def __init__(self, num_filters, kernel_size, strides=(1, 1),\n", 369 | " padding='same', kernel_initializer='glorot_uniform',\n", 370 | " kernel_constraint=None,\n", 371 | " **kwargs):\n", 372 | " super(Dilation2D, self).__init__(**kwargs)\n", 373 | " self.num_filters = num_filters\n", 374 | " self.kernel_size = kernel_size\n", 375 | " self.strides = strides\n", 376 | " self.padding = padding\n", 377 | "\n", 378 | " self.kernel_initializer = initializers.get(kernel_initializer)\n", 379 | " self.kernel_constraint = constraints.get(kernel_constraint)\n", 380 | " # for we are assuming channel last\n", 381 | " self.channel_axis = -1\n", 382 | "\n", 383 | " # self.output_dim = output_dim\n", 384 | "\n", 385 | " def build(self, input_shape):\n", 386 | " if input_shape[self.channel_axis] is None:\n", 387 | " raise ValueError('The channel dimension of the inputs '\n", 388 | " 'should be defined. Found `None`.')\n", 389 | "\n", 390 | " input_dim = input_shape[self.channel_axis]\n", 391 | " kernel_shape = self.kernel_size + (input_dim, self.num_filters)\n", 392 | "\n", 393 | " self.kernel = self.add_weight(shape=kernel_shape,\n", 394 | " initializer=self.kernel_initializer,\n", 395 | " name='kernel',\n", 396 | " constraint=self.kernel_constraint)\n", 397 | "\n", 398 | " # Be sure to call this at the end\n", 399 | " super(Dilation2D, self).build(input_shape)\n", 400 | " self.input_spec = InputSpec(ndim=4,\n", 401 | " axes={3: input_shape[-1]})\n", 402 | " self.built = True\n", 403 | "\n", 404 | " def call(self, x):\n", 405 | " outputs = self.dilation2d(x, self.kernel[...,1],\n", 406 | " self.strides, self.padding)\n", 407 | "\n", 408 | " return outputs\n", 409 | "\n", 410 | " def dilation2d(self, x, st_element, strides, padding,\n", 411 | " rates=(1, 1, 1, 1)):\n", 412 | " # tf.nn.erosion2d(input, filter, strides, rates, padding, name=None)\n", 413 | " x = tf.compat.v1.nn.dilation2d(x, st_element, (1, ) + strides + (1, ),\n", 414 | " rates, padding.upper())\n", 415 | " return x\n", 416 | "\n", 417 | "\n", 418 | " class SpectralMorph(Layer):\n", 419 | " def __init__(self):\n", 420 | "\n", 421 | " super(SpectralMorph, self).__init__()\n", 422 | " self.filters = filters\n", 423 | " num_filters = filters\n", 424 | " #self.init = RandomNormal()\n", 425 | " def call(self, x):\n", 426 | " z1 = Erosion2D(num_filters = self.filters, kernel_size = (3,3),padding=\"same\", strides=(1,1))(x)\n", 427 | " z1 = Conv2D(filters = self.filters, kernel_size = (1,1), padding='same')(z1)\n", 428 | " z2 = Dilation2D(num_filters = self.filters, kernel_size = (3,3), padding=\"same\", strides=(1,1))(x)\n", 429 | " z2 = Conv2D(filters = self.filters, kernel_size = (1,1), padding='same')(z2)\n", 430 | " x = Add()([z1, z2])\n", 431 | " return x\n", 432 | "\n", 433 | "\n", 434 | " class SpatialMorph(Layer):\n", 435 | " def __init__(self):\n", 436 | "\n", 437 | " super(SpatialMorph, self).__init__()\n", 438 | " self.filters = filters\n", 439 | " num_filters = filters\n", 440 | " #self.init = RandomNormal()\n", 441 | " def call(self, x):\n", 442 | " z1 = Erosion2D(num_filters = self.filters, kernel_size = (3,3),padding=\"same\", strides=(1,1))(x)\n", 443 | " z1 = Conv2D(filters = self.filters, kernel_size = (3,3), padding='same')(z1)\n", 444 | " z2 = Dilation2D(num_filters = self.filters, kernel_size = (3,3), padding=\"same\", strides=(1,1))(x)\n", 445 | " z2 = Conv2D(filters = self.filters, kernel_size = (3,3), padding='same')(z2)\n", 446 | " x = Add()([z1, z2])\n", 447 | " return x\n", 448 | "\n", 449 | "\n", 450 | "\n", 451 | "\n", 452 | " for iter in range(3):\n", 453 | " # tf.compat.v1.set_random_seed(43)\n", 454 | " # np.random.seed(43)\n", 455 | "\n", 456 | " input_layer = Input((11, 11,NC))\n", 457 | "\n", 458 | " z = Conv2D(filters, use_bias = True, kernel_size = 3, padding = 'same')(input_layer)\n", 459 | " z = BatchNormalization()(z)\n", 460 | " z = Activation('relu')(z)\n", 461 | "\n", 462 | " z = SpectralMorph()(z)\n", 463 | " z = BatchNormalization()(z)\n", 464 | " z = Activation('relu')(z)\n", 465 | "\n", 466 | " z = Conv2D(64, use_bias = True, kernel_size = 3, padding = 'same')(z)\n", 467 | " z = BatchNormalization()(z)\n", 468 | " z = Activation('relu')(z)\n", 469 | "\n", 470 | "\n", 471 | " z = SpatialMorph()(z)\n", 472 | " z = BatchNormalization()(z)\n", 473 | " z = Activation('relu')(z)\n", 474 | "\n", 475 | " # z = MaxPooling2D(pool_size=(2,2), strides =1, padding = 'same')(z)\n", 476 | "\n", 477 | " z = Conv2D(128, use_bias = True, kernel_size = 3, padding = 'same')(z)\n", 478 | " z = BatchNormalization()(z)\n", 479 | " z = Activation('relu')(z)\n", 480 | " # z = keras.layers.Concatenate(axis=3)([zspec,zspat])\n", 481 | "\n", 482 | " z = MaxPooling2D(pool_size=(2,2), strides =2, padding = 'same')(z)\n", 483 | "\n", 484 | " z = Conv2D(512, use_bias = True, kernel_size = 3, padding = 'same')(z)\n", 485 | " z = BatchNormalization()(z)\n", 486 | " z = Activation('relu')(z)\n", 487 | " #z = Dropout(0.5)(z)\n", 488 | "\n", 489 | " #z = Flatten()(z)\n", 490 | " z = GlobalAveragePooling2D()(z)\n", 491 | " #z = Dropout(0.3)(z)\n", 492 | " z = Dense(128, activation=\"relu\")(z)\n", 493 | " z = BatchNormalization()(z)\n", 494 | " z = Dropout(0.5)(z)\n", 495 | " z = Dense(64, activation=\"relu\")(z)\n", 496 | " z = BatchNormalization()(z)\n", 497 | " z = Dropout(0.5)(z)\n", 498 | " output_layer = Dense(Classes, activation=\"softmax\")(z)\n", 499 | "\n", 500 | "\n", 501 | "\n", 502 | "\n", 503 | " clf = Model(inputs=input_layer, outputs=output_layer)\n", 504 | " clf.summary()\n", 505 | " clf.compile(loss='categorical_crossentropy', optimizer=tf.keras.optimizers.Adam(), metrics=['accuracy'])\n", 506 | "\n", 507 | "\n", 508 | " valdata = (TestPatch1.cpu().detach().numpy(), keras_to_categorical(TestLabel1.reshape(-1).cpu().detach().numpy(),Classes))\n", 509 | " h = clf.fit(TrainPatch1.cpu().detach().numpy(), keras_to_categorical(TrainLabel1.reshape(-1).cpu().detach().numpy(),Classes),\n", 510 | " batch_size=batchsize,\n", 511 | " epochs=EPOCH,\n", 512 | " verbose=True,\n", 513 | " validation_data=valdata,\n", 514 | " workers=4,\n", 515 | " callbacks = [ModelCheckpoint(datasetName+\"/best_model_HSIOnly.h5\", monitor='val_accuracy', verbose=0, save_best_only=True,save_weights_only=True)])\n", 516 | "\n", 517 | "\n", 518 | "\n", 519 | " clf.load_weights(datasetName+\"/best_model_HSIOnly.h5\")\n", 520 | " pred_y = np.argmax(clf.predict(TestPatch1.cpu().detach().numpy()), axis=1)\n", 521 | "\n", 522 | " y_test = TestLabel1.reshape(-1).cpu().detach().numpy()\n", 523 | " oa = accuracy_score(y_test, pred_y)*100\n", 524 | " confusion = confusion_matrix(y_test, pred_y)\n", 525 | " each_acc, aa = AA_andEachClassAccuracy(confusion)\n", 526 | " kappa = cohen_kappa_score(y_test, pred_y)*100\n", 527 | " KAPPA.append(kappa)\n", 528 | " OA.append(oa)\n", 529 | " AA.append(aa)\n", 530 | " ELEMENT_ACC[iter, :] = each_acc\n", 531 | "\n", 532 | " print(\"--------\" + datasetName + \" Training Finished-----------\")\n", 533 | " record.record_output(OA, AA, KAPPA, ELEMENT_ACC,'./' + datasetName +'/MorphCNN_Report_' + datasetName +'.txt')\n" 534 | ] 535 | }, 536 | { 537 | "cell_type": "code", 538 | "execution_count": 2, 539 | "id": "fa271c91", 540 | "metadata": { 541 | "ExecuteTime": { 542 | "end_time": "2021-11-11T14:12:00.148736Z", 543 | "start_time": "2021-11-11T14:11:58.592494Z" 544 | } 545 | }, 546 | "outputs": [ 547 | { 548 | "name": "stderr", 549 | "output_type": "stream", 550 | "text": [ 551 | "2021-11-11 06:11:58.642248: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1564] Found device 0 with properties: \n", 552 | "pciBusID: 0035:04:00.0 name: Tesla V100-SXM2-32GB computeCapability: 7.0\n", 553 | "coreClock: 1.53GHz coreCount: 80 deviceMemorySize: 31.50GiB deviceMemoryBandwidth: 836.37GiB/s\n", 554 | "2021-11-11 06:11:58.661769: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudart.so.10.2\n", 555 | "2021-11-11 06:11:58.667346: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcublas.so.10\n", 556 | "2021-11-11 06:11:58.667395: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcufft.so.10\n", 557 | "2021-11-11 06:11:58.667419: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcurand.so.10\n", 558 | "2021-11-11 06:11:58.758397: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcusolver.so.10\n", 559 | "2021-11-11 06:11:58.758568: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcusparse.so.10\n", 560 | "2021-11-11 06:11:58.758607: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudnn.so.7\n", 561 | "2021-11-11 06:11:58.761774: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1706] Adding visible gpu devices: 0\n", 562 | "2021-11-11 06:11:58.762037: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1105] Device interconnect StreamExecutor with strength 1 edge matrix:\n", 563 | "2021-11-11 06:11:58.762055: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1111] 0 \n", 564 | "2021-11-11 06:11:58.762068: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1124] 0: N \n", 565 | "2021-11-11 06:11:58.764657: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1250] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 29696 MB memory) -> physical GPU (device: 0, name: Tesla V100-SXM2-32GB, pci bus id: 0035:04:00.0, compute capability: 7.0)\n" 566 | ] 567 | }, 568 | { 569 | "ename": "TypeError", 570 | "evalue": "An op outside of the function building code is being passed\na \"Graph\" tensor. It is possible to have Graph tensors\nleak out of the function building context by including a\ntf.init_scope in your function building code.\nFor example, the following function will fail:\n @tf.function\n def has_init_scope():\n my_constant = tf.constant(1.)\n with tf.init_scope():\n added = my_constant * 2\nThe graph tensor has name: conv2d_16/kernel:0", 571 | "output_type": "error", 572 | "traceback": [ 573 | "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", 574 | "\u001b[0;31m_FallbackException\u001b[0m Traceback (most recent call last)", 575 | "\u001b[0;32m~/.conda/envs/purb37/lib/python3.7/site-packages/tensorflow/python/ops/gen_resource_variable_ops.py\u001b[0m in \u001b[0;36massign_variable_op\u001b[0;34m(resource, value, name)\u001b[0m\n\u001b[1;32m 141\u001b[0m \u001b[0m_ctx\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_context_handle\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtld\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdevice_name\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m\"AssignVariableOp\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 142\u001b[0;31m tld.op_callbacks, resource, value)\n\u001b[0m\u001b[1;32m 143\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0m_result\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", 576 | "\u001b[0;31m_FallbackException\u001b[0m: This function does not handle the case of the path where all inputs are not already EagerTensors.", 577 | "\nDuring handling of the above exception, another exception occurred:\n", 578 | "\u001b[0;31mTypeError\u001b[0m Traceback (most recent call last)", 579 | "\u001b[0;32m/tmp/ipykernel_10929/1305973327.py\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mclf\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mload_weights\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdatasetName\u001b[0m\u001b[0;34m+\u001b[0m\u001b[0;34m\"/best_model_HSIOnly_Deviation.h5\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", 580 | "\u001b[0;32m~/.conda/envs/purb37/lib/python3.7/site-packages/tensorflow/python/keras/engine/training_v1.py\u001b[0m in \u001b[0;36mload_weights\u001b[0;34m(self, filepath, by_name, skip_mismatch)\u001b[0m\n\u001b[1;32m 231\u001b[0m raise ValueError('Load weights is not yet supported with TPUStrategy '\n\u001b[1;32m 232\u001b[0m 'with steps_per_run greater than 1.')\n\u001b[0;32m--> 233\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0msuper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mModel\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mload_weights\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfilepath\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mby_name\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mskip_mismatch\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 234\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 235\u001b[0m \u001b[0;34m@\u001b[0m\u001b[0mtrackable\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mno_automatic_dependency_tracking\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", 581 | "\u001b[0;32m~/.conda/envs/purb37/lib/python3.7/site-packages/tensorflow/python/keras/engine/training.py\u001b[0m in \u001b[0;36mload_weights\u001b[0;34m(self, filepath, by_name, skip_mismatch)\u001b[0m\n\u001b[1;32m 248\u001b[0m raise ValueError('Load weights is not yet supported with TPUStrategy '\n\u001b[1;32m 249\u001b[0m 'with steps_per_run greater than 1.')\n\u001b[0;32m--> 250\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0msuper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mModel\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mload_weights\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfilepath\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mby_name\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mskip_mismatch\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 251\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 252\u001b[0m def compile(self,\n", 582 | "\u001b[0;32m~/.conda/envs/purb37/lib/python3.7/site-packages/tensorflow/python/keras/engine/network.py\u001b[0m in \u001b[0;36mload_weights\u001b[0;34m(self, filepath, by_name, skip_mismatch)\u001b[0m\n\u001b[1;32m 1270\u001b[0m f, self.layers, skip_mismatch=skip_mismatch)\n\u001b[1;32m 1271\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1272\u001b[0;31m \u001b[0mhdf5_format\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mload_weights_from_hdf5_group\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mf\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlayers\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1273\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1274\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_updated_config\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", 583 | "\u001b[0;32m~/.conda/envs/purb37/lib/python3.7/site-packages/tensorflow/python/keras/saving/hdf5_format.py\u001b[0m in \u001b[0;36mload_weights_from_hdf5_group\u001b[0;34m(f, layers)\u001b[0m\n\u001b[1;32m 705\u001b[0m str(len(weight_values)) + ' elements.')\n\u001b[1;32m 706\u001b[0m \u001b[0mweight_value_tuples\u001b[0m \u001b[0;34m+=\u001b[0m \u001b[0mzip\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msymbolic_weights\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mweight_values\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 707\u001b[0;31m \u001b[0mK\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbatch_set_value\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mweight_value_tuples\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 708\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 709\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", 584 | "\u001b[0;32m~/.conda/envs/purb37/lib/python3.7/site-packages/tensorflow/python/keras/backend.py\u001b[0m in \u001b[0;36mbatch_set_value\u001b[0;34m(tuples)\u001b[0m\n\u001b[1;32m 3382\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mexecuting_eagerly_outside_functions\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3383\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mx\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mvalue\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mtuples\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 3384\u001b[0;31m \u001b[0mx\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0massign\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0masarray\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mvalue\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdtype\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mdtype\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 3385\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3386\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mget_graph\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mas_default\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", 585 | "\u001b[0;32m~/.conda/envs/purb37/lib/python3.7/site-packages/tensorflow/python/ops/resource_variable_ops.py\u001b[0m in \u001b[0;36massign\u001b[0;34m(self, value, use_locking, name, read_value)\u001b[0m\n\u001b[1;32m 846\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_shape\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0massert_is_compatible_with\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mvalue_tensor\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 847\u001b[0m assign_op = gen_resource_variable_ops.assign_variable_op(\n\u001b[0;32m--> 848\u001b[0;31m self.handle, value_tensor, name=name)\n\u001b[0m\u001b[1;32m 849\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mread_value\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 850\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_lazy_read\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0massign_op\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", 586 | "\u001b[0;32m~/.conda/envs/purb37/lib/python3.7/site-packages/tensorflow/python/ops/gen_resource_variable_ops.py\u001b[0m in \u001b[0;36massign_variable_op\u001b[0;34m(resource, value, name)\u001b[0m\n\u001b[1;32m 145\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 146\u001b[0m return assign_variable_op_eager_fallback(\n\u001b[0;32m--> 147\u001b[0;31m resource, value, name=name, ctx=_ctx)\n\u001b[0m\u001b[1;32m 148\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0m_core\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_SymbolicException\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 149\u001b[0m \u001b[0;32mpass\u001b[0m \u001b[0;31m# Add nodes to the TensorFlow graph.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", 587 | "\u001b[0;32m~/.conda/envs/purb37/lib/python3.7/site-packages/tensorflow/python/ops/gen_resource_variable_ops.py\u001b[0m in \u001b[0;36massign_variable_op_eager_fallback\u001b[0;34m(resource, value, name, ctx)\u001b[0m\n\u001b[1;32m 163\u001b[0m \u001b[0m_attrs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0;34m\"dtype\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0m_attr_dtype\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 164\u001b[0m _result = _execute.execute(b\"AssignVariableOp\", 0, inputs=_inputs_flat,\n\u001b[0;32m--> 165\u001b[0;31m attrs=_attrs, ctx=ctx, name=name)\n\u001b[0m\u001b[1;32m 166\u001b[0m \u001b[0m_result\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 167\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0m_result\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", 588 | "\u001b[0;32m~/.conda/envs/purb37/lib/python3.7/site-packages/tensorflow/python/eager/execute.py\u001b[0m in \u001b[0;36mquick_execute\u001b[0;34m(op_name, num_outputs, inputs, attrs, ctx, name)\u001b[0m\n\u001b[1;32m 73\u001b[0m \u001b[0;34m\"Inputs to eager execution function cannot be Keras symbolic \"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 74\u001b[0m \"tensors, but found {}\".format(keras_symbolic_tensors))\n\u001b[0;32m---> 75\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 76\u001b[0m \u001b[0;31m# pylint: enable=protected-access\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 77\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mtensors\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", 589 | "\u001b[0;32m~/.conda/envs/purb37/lib/python3.7/site-packages/tensorflow/python/eager/execute.py\u001b[0m in \u001b[0;36mquick_execute\u001b[0;34m(op_name, num_outputs, inputs, attrs, ctx, name)\u001b[0m\n\u001b[1;32m 58\u001b[0m \u001b[0mctx\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mensure_initialized\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 59\u001b[0m tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,\n\u001b[0;32m---> 60\u001b[0;31m inputs, attrs, num_outputs)\n\u001b[0m\u001b[1;32m 61\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mcore\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_NotOkStatusException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 62\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mname\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", 590 | "\u001b[0;31mTypeError\u001b[0m: An op outside of the function building code is being passed\na \"Graph\" tensor. It is possible to have Graph tensors\nleak out of the function building context by including a\ntf.init_scope in your function building code.\nFor example, the following function will fail:\n @tf.function\n def has_init_scope():\n my_constant = tf.constant(1.)\n with tf.init_scope():\n added = my_constant * 2\nThe graph tensor has name: conv2d_16/kernel:0" 591 | ] 592 | } 593 | ], 594 | "source": [ 595 | " clf.load_weights(datasetName+\"/best_model_HSIOnly_Deviation.h5\")" 596 | ] 597 | }, 598 | { 599 | "cell_type": "code", 600 | "execution_count": 10, 601 | "id": "2f0f65ae", 602 | "metadata": { 603 | "ExecuteTime": { 604 | "end_time": "2021-11-11T14:25:53.038521Z", 605 | "start_time": "2021-11-11T14:25:52.954438Z" 606 | } 607 | }, 608 | "outputs": [], 609 | "source": [ 610 | "with g.as_default():\n", 611 | " clf.save_weights(datasetName+\"/best_model_HSIOnly_Deviation.h5\")" 612 | ] 613 | }, 614 | { 615 | "cell_type": "code", 616 | "execution_count": 8, 617 | "id": "c69e2476", 618 | "metadata": { 619 | "ExecuteTime": { 620 | "end_time": "2021-11-11T14:13:07.671867Z", 621 | "start_time": "2021-11-11T14:13:06.636678Z" 622 | } 623 | }, 624 | "outputs": [], 625 | "source": [ 626 | "with g.as_default():\n", 627 | " clf.save(datasetName+\"/best_model_HSIOnly_Deviation.h5\")" 628 | ] 629 | }, 630 | { 631 | "cell_type": "code", 632 | "execution_count": 7, 633 | "id": "94ffc7be", 634 | "metadata": { 635 | "ExecuteTime": { 636 | "end_time": "2021-11-11T14:13:00.816471Z", 637 | "start_time": "2021-11-11T14:13:00.786897Z" 638 | } 639 | }, 640 | "outputs": [ 641 | { 642 | "ename": "ValueError", 643 | "evalue": "You are trying to load a weight file containing 0 layers into a model with 15 layers.", 644 | "output_type": "error", 645 | "traceback": [ 646 | "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", 647 | "\u001b[0;31mValueError\u001b[0m Traceback (most recent call last)", 648 | "\u001b[0;32m/tmp/ipykernel_10929/4100435584.py\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mg\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mas_default\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 2\u001b[0;31m \u001b[0mclf\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mload_weights\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdatasetName\u001b[0m\u001b[0;34m+\u001b[0m\u001b[0;34m\"/best_model_HSIOnly_Deviation.h5\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", 649 | "\u001b[0;32m~/.conda/envs/purb37/lib/python3.7/site-packages/tensorflow/python/keras/engine/training_v1.py\u001b[0m in \u001b[0;36mload_weights\u001b[0;34m(self, filepath, by_name, skip_mismatch)\u001b[0m\n\u001b[1;32m 231\u001b[0m raise ValueError('Load weights is not yet supported with TPUStrategy '\n\u001b[1;32m 232\u001b[0m 'with steps_per_run greater than 1.')\n\u001b[0;32m--> 233\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0msuper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mModel\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mload_weights\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfilepath\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mby_name\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mskip_mismatch\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 234\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 235\u001b[0m \u001b[0;34m@\u001b[0m\u001b[0mtrackable\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mno_automatic_dependency_tracking\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", 650 | "\u001b[0;32m~/.conda/envs/purb37/lib/python3.7/site-packages/tensorflow/python/keras/engine/training.py\u001b[0m in \u001b[0;36mload_weights\u001b[0;34m(self, filepath, by_name, skip_mismatch)\u001b[0m\n\u001b[1;32m 248\u001b[0m raise ValueError('Load weights is not yet supported with TPUStrategy '\n\u001b[1;32m 249\u001b[0m 'with steps_per_run greater than 1.')\n\u001b[0;32m--> 250\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0msuper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mModel\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mload_weights\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfilepath\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mby_name\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mskip_mismatch\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 251\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 252\u001b[0m def compile(self,\n", 651 | "\u001b[0;32m~/.conda/envs/purb37/lib/python3.7/site-packages/tensorflow/python/keras/engine/network.py\u001b[0m in \u001b[0;36mload_weights\u001b[0;34m(self, filepath, by_name, skip_mismatch)\u001b[0m\n\u001b[1;32m 1270\u001b[0m f, self.layers, skip_mismatch=skip_mismatch)\n\u001b[1;32m 1271\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1272\u001b[0;31m \u001b[0mhdf5_format\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mload_weights_from_hdf5_group\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mf\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlayers\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1273\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1274\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_updated_config\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", 652 | "\u001b[0;32m~/.conda/envs/purb37/lib/python3.7/site-packages/tensorflow/python/keras/saving/hdf5_format.py\u001b[0m in \u001b[0;36mload_weights_from_hdf5_group\u001b[0;34m(f, layers)\u001b[0m\n\u001b[1;32m 683\u001b[0m \u001b[0;34m'containing '\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0mstr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlayer_names\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m+\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 684\u001b[0m \u001b[0;34m' layers into a model with '\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0mstr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfiltered_layers\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m+\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 685\u001b[0;31m ' layers.')\n\u001b[0m\u001b[1;32m 686\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 687\u001b[0m \u001b[0;31m# We batch weight value assignments in a single backend call\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", 653 | "\u001b[0;31mValueError\u001b[0m: You are trying to load a weight file containing 0 layers into a model with 15 layers." 654 | ] 655 | } 656 | ], 657 | "source": [ 658 | "with g.as_default():\n", 659 | " clf.load_weights(datasetName+\"/best_model_HSIOnly_Deviation.h5\")" 660 | ] 661 | } 662 | ], 663 | "metadata": { 664 | "kernelspec": { 665 | "display_name": "Python 3 (ipykernel)", 666 | "language": "python", 667 | "name": "python3" 668 | }, 669 | "language_info": { 670 | "codemirror_mode": { 671 | "name": "ipython", 672 | "version": 3 673 | }, 674 | "file_extension": ".py", 675 | "mimetype": "text/x-python", 676 | "name": "python", 677 | "nbconvert_exporter": "python", 678 | "pygments_lexer": "ipython3", 679 | "version": "3.7.7" 680 | } 681 | }, 682 | "nbformat": 4, 683 | "nbformat_minor": 5 684 | } 685 | --------------------------------------------------------------------------------