├── FaultNet.ipynb ├── FaultNet_Machine_Learning_Algorithm.ipynb ├── README.md ├── Testing-CWRU-SNR.ipynb └── images ├── cnn.png ├── results.png └── results_paderborn.png /FaultNet.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "import numpy as np\n", 10 | "import pandas as pd" 11 | ] 12 | }, 13 | { 14 | "cell_type": "code", 15 | "execution_count": 2, 16 | "metadata": {}, 17 | "outputs": [], 18 | "source": [ 19 | "import torch\n", 20 | "import torch.nn as nn\n", 21 | "from torchvision.datasets import CIFAR10\n", 22 | "from torchvision.transforms import transforms\n", 23 | "from torch.utils.data import DataLoader\n", 24 | "from torch.optim import Adam\n", 25 | "from torch.autograd import Variable\n", 26 | "import torch.nn.functional as F" 27 | ] 28 | }, 29 | { 30 | "cell_type": "code", 31 | "execution_count": 3, 32 | "metadata": {}, 33 | "outputs": [], 34 | "source": [ 35 | "data = np.load('CWRU_dataset.npy')\n", 36 | "labels = np.load('CWRU_lables.npy')" 37 | ] 38 | }, 39 | { 40 | "cell_type": "code", 41 | "execution_count": 4, 42 | "metadata": {}, 43 | "outputs": [], 44 | "source": [ 45 | "x=data[:,0:1600]" 46 | ] 47 | }, 48 | { 49 | "cell_type": "code", 50 | "execution_count": 5, 51 | "metadata": {}, 52 | "outputs": [], 53 | "source": [ 54 | "def mean(data,no_elements):\n", 55 | " X=np.zeros((data.shape[0],data.shape[1]))\n", 56 | " for i in range(data.shape[1]-no_elements+1):\n", 57 | " X[:,i]=np.mean(data[:,i:i+no_elements],axis=1)\n", 58 | " return X.astype(np.float16)\n", 59 | "def median(data,no_elements):\n", 60 | " X=np.zeros((data.shape[0],data.shape[1]))\n", 61 | " for i in range(data.shape[1]-no_elements+1):\n", 62 | " X[:,i]=np.median(data[:,i:i+no_elements],axis=1)\n", 63 | " return X.astype(np.float16)\n", 64 | "def sig_image(data,size):\n", 65 | " X=np.zeros((data.shape[0],size,size))\n", 66 | " for i in range(data.shape[0]):\n", 67 | " X[i]=(data[i,:].reshape(size,size))\n", 68 | " return X.astype(np.float16)" 69 | ] 70 | }, 71 | { 72 | "cell_type": "code", 73 | "execution_count": 6, 74 | "metadata": {}, 75 | "outputs": [], 76 | "source": [ 77 | "channel_mean=(mean(x,10)).astype(np.float16)\n", 78 | "x_m=sig_image(channel_mean,40)\n", 79 | "channel_median=(median(x,10)).astype(np.float16)\n", 80 | "x_md=sig_image(x,40)" 81 | ] 82 | }, 83 | { 84 | "cell_type": "code", 85 | "execution_count": 7, 86 | "metadata": {}, 87 | "outputs": [], 88 | "source": [ 89 | "x_n=sig_image(x,40)" 90 | ] 91 | }, 92 | { 93 | "cell_type": "code", 94 | "execution_count": 8, 95 | "metadata": {}, 96 | "outputs": [ 97 | { 98 | "data": { 99 | "text/plain": [ 100 | "(2800, 40, 40)" 101 | ] 102 | }, 103 | "execution_count": 8, 104 | "metadata": {}, 105 | "output_type": "execute_result" 106 | } 107 | ], 108 | "source": [ 109 | "x_n.shape" 110 | ] 111 | }, 112 | { 113 | "cell_type": "code", 114 | "execution_count": 9, 115 | "metadata": {}, 116 | "outputs": [ 117 | { 118 | "data": { 119 | "text/plain": [ 120 | "(2800, 40, 40)" 121 | ] 122 | }, 123 | "execution_count": 9, 124 | "metadata": {}, 125 | "output_type": "execute_result" 126 | } 127 | ], 128 | "source": [ 129 | "x_m.shape" 130 | ] 131 | }, 132 | { 133 | "cell_type": "code", 134 | "execution_count": 10, 135 | "metadata": {}, 136 | "outputs": [], 137 | "source": [ 138 | "X=np.stack((x_n,x_m,x_md),axis=1).astype(np.float16)" 139 | ] 140 | }, 141 | { 142 | "cell_type": "code", 143 | "execution_count": 11, 144 | "metadata": {}, 145 | "outputs": [ 146 | { 147 | "data": { 148 | "text/plain": [ 149 | "(2800, 3, 40, 40)" 150 | ] 151 | }, 152 | "execution_count": 11, 153 | "metadata": {}, 154 | "output_type": "execute_result" 155 | } 156 | ], 157 | "source": [ 158 | "X.shape" 159 | ] 160 | }, 161 | { 162 | "cell_type": "code", 163 | "execution_count": 12, 164 | "metadata": {}, 165 | "outputs": [], 166 | "source": [ 167 | "from sklearn.model_selection import train_test_split\n", 168 | "trainx, testx, trainlabel, testlabel = train_test_split(X, labels, test_size=0.2, random_state=20)" 169 | ] 170 | }, 171 | { 172 | "cell_type": "code", 173 | "execution_count": 13, 174 | "metadata": {}, 175 | "outputs": [], 176 | "source": [ 177 | "sig_train, sig_test = trainx,testx\n", 178 | "lab_train, lab_test = trainlabel,testlabel" 179 | ] 180 | }, 181 | { 182 | "cell_type": "code", 183 | "execution_count": 14, 184 | "metadata": {}, 185 | "outputs": [], 186 | "source": [ 187 | "sig_train = torch.from_numpy(sig_train)\n", 188 | "sig_test = torch.from_numpy(sig_test)\n", 189 | "lab_train= torch.from_numpy(lab_train)\n", 190 | "lab_test = torch.from_numpy(lab_test)" 191 | ] 192 | }, 193 | { 194 | "cell_type": "code", 195 | "execution_count": 15, 196 | "metadata": {}, 197 | "outputs": [], 198 | "source": [ 199 | "import torch.utils.data as data_utils\n", 200 | "batch_size = 128 \n", 201 | "train_tensor = data_utils.TensorDataset(sig_train, lab_train) \n", 202 | "train_loader = data_utils.DataLoader(dataset = train_tensor, batch_size = batch_size, shuffle = True)" 203 | ] 204 | }, 205 | { 206 | "cell_type": "code", 207 | "execution_count": 16, 208 | "metadata": {}, 209 | "outputs": [], 210 | "source": [ 211 | "batch_size = 1024\n", 212 | "test_tensor = data_utils.TensorDataset(sig_test, lab_test) \n", 213 | "test_loader = data_utils.DataLoader(dataset = test_tensor, batch_size = batch_size, shuffle = False)" 214 | ] 215 | }, 216 | { 217 | "cell_type": "code", 218 | "execution_count": 17, 219 | "metadata": {}, 220 | "outputs": [ 221 | { 222 | "data": { 223 | "text/plain": [ 224 | "torch.Size([2240, 3, 40, 40])" 225 | ] 226 | }, 227 | "execution_count": 17, 228 | "metadata": {}, 229 | "output_type": "execute_result" 230 | } 231 | ], 232 | "source": [ 233 | "sig_train.size()" 234 | ] 235 | }, 236 | { 237 | "cell_type": "code", 238 | "execution_count": 18, 239 | "metadata": {}, 240 | "outputs": [ 241 | { 242 | "data": { 243 | "text/plain": [ 244 | "torch.Size([560, 3, 40, 40])" 245 | ] 246 | }, 247 | "execution_count": 18, 248 | "metadata": {}, 249 | "output_type": "execute_result" 250 | } 251 | ], 252 | "source": [ 253 | "sig_test.size()" 254 | ] 255 | }, 256 | { 257 | "cell_type": "code", 258 | "execution_count": 19, 259 | "metadata": {}, 260 | "outputs": [], 261 | "source": [ 262 | "class CNN(nn.Module):\n", 263 | " def __init__(self):\n", 264 | " super(CNN, self).__init__()\n", 265 | " self.conv1 = nn.Conv2d(3, 32, kernel_size=4,stride=1,padding = 1)\n", 266 | " self.mp1 = nn.MaxPool2d(kernel_size=4,stride=2)\n", 267 | " self.conv2 = nn.Conv2d(32,64, kernel_size=4,stride =1)\n", 268 | " self.mp2 = nn.MaxPool2d(kernel_size=4,stride=2)\n", 269 | " self.fc1= nn.Linear(2304,256)\n", 270 | " self.dp1 = nn.Dropout(p=0.2)\n", 271 | " self.fc2 = nn.Linear(256,10)\n", 272 | "\n", 273 | " def forward(self, x):\n", 274 | " in_size = x.size(0)\n", 275 | " x = F.relu(self.mp1(self.conv1(x)))\n", 276 | " x = F.relu(self.mp2(self.conv2(x)))\n", 277 | " x = x.view(in_size,-1)\n", 278 | " x = F.relu(self.fc1(x))\n", 279 | " x = self.dp1(x)\n", 280 | " x = self.fc2(x)\n", 281 | " \n", 282 | " return F.log_softmax(x, dim=1)" 283 | ] 284 | }, 285 | { 286 | "cell_type": "code", 287 | "execution_count": 21, 288 | "metadata": {}, 289 | "outputs": [], 290 | "source": [ 291 | "cnn = CNN().double()" 292 | ] 293 | }, 294 | { 295 | "cell_type": "code", 296 | "execution_count": 22, 297 | "metadata": {}, 298 | "outputs": [], 299 | "source": [ 300 | "criterion = nn.CrossEntropyLoss()\n", 301 | "optimizer = torch.optim.Adam(cnn.parameters(), lr=0.001)" 302 | ] 303 | }, 304 | { 305 | "cell_type": "code", 306 | "execution_count": 23, 307 | "metadata": {}, 308 | "outputs": [], 309 | "source": [ 310 | "num_epochs = 100" 311 | ] 312 | }, 313 | { 314 | "cell_type": "code", 315 | "execution_count": 24, 316 | "metadata": {}, 317 | "outputs": [ 318 | { 319 | "name": "stdout", 320 | "output_type": "stream", 321 | "text": [ 322 | "Epoch [1/100], Step [1/18], Loss: 2.3026, Train Accuracy: 15.62%\n", 323 | "Epoch [1/100], Step [2/18], Loss: 2.1436, Train Accuracy: 12.50%\n", 324 | "Epoch [1/100], Step [3/18], Loss: 2.2310, Train Accuracy: 7.81%\n", 325 | "Epoch [1/100], Step [4/18], Loss: 2.1341, Train Accuracy: 17.19%\n", 326 | "Epoch [1/100], Step [5/18], Loss: 1.9999, Train Accuracy: 17.97%\n", 327 | "Epoch [1/100], Step [6/18], Loss: 2.1688, Train Accuracy: 12.50%\n", 328 | "Epoch [1/100], Step [7/18], Loss: 1.9400, Train Accuracy: 21.09%\n", 329 | "Epoch [1/100], Step [8/18], Loss: 1.9452, Train Accuracy: 21.88%\n", 330 | "Epoch [1/100], Step [9/18], Loss: 1.8811, Train Accuracy: 20.31%\n", 331 | "Epoch [1/100], Step [10/18], Loss: 1.8257, Train Accuracy: 31.25%\n", 332 | "Epoch [1/100], Step [11/18], Loss: 1.8228, Train Accuracy: 29.69%\n", 333 | "Epoch [1/100], Step [12/18], Loss: 1.8597, Train Accuracy: 22.66%\n", 334 | "Epoch [1/100], Step [13/18], Loss: 1.8749, Train Accuracy: 20.31%\n", 335 | "Epoch [1/100], Step [14/18], Loss: 1.6941, Train Accuracy: 30.47%\n", 336 | "Epoch [1/100], Step [15/18], Loss: 1.7534, Train Accuracy: 34.38%\n", 337 | "Epoch [1/100], Step [16/18], Loss: 1.6761, Train Accuracy: 40.62%\n", 338 | "Epoch [1/100], Step [17/18], Loss: 1.6986, Train Accuracy: 34.38%\n", 339 | "Epoch [1/100], Step [18/18], Loss: 1.5173, Train Accuracy: 39.06%\n", 340 | "Epoch [5/100], Step [1/18], Loss: 0.5248, Train Accuracy: 82.03%\n", 341 | "Epoch [5/100], Step [2/18], Loss: 0.5242, Train Accuracy: 77.34%\n", 342 | "Epoch [5/100], Step [3/18], Loss: 0.4709, Train Accuracy: 83.59%\n", 343 | "Epoch [5/100], Step [4/18], Loss: 0.4358, Train Accuracy: 79.69%\n", 344 | "Epoch [5/100], Step [5/18], Loss: 0.3925, Train Accuracy: 82.03%\n", 345 | "Epoch [5/100], Step [6/18], Loss: 0.5974, Train Accuracy: 75.78%\n", 346 | "Epoch [5/100], Step [7/18], Loss: 0.5357, Train Accuracy: 77.34%\n", 347 | "Epoch [5/100], Step [8/18], Loss: 0.5182, Train Accuracy: 78.12%\n", 348 | "Epoch [5/100], Step [9/18], Loss: 0.4237, Train Accuracy: 87.50%\n", 349 | "Epoch [5/100], Step [10/18], Loss: 0.4703, Train Accuracy: 82.03%\n", 350 | "Epoch [5/100], Step [11/18], Loss: 0.5011, Train Accuracy: 78.91%\n", 351 | "Epoch [5/100], Step [12/18], Loss: 0.5537, Train Accuracy: 81.25%\n", 352 | "Epoch [5/100], Step [13/18], Loss: 0.4035, Train Accuracy: 78.91%\n", 353 | "Epoch [5/100], Step [14/18], Loss: 0.3534, Train Accuracy: 84.38%\n", 354 | "Epoch [5/100], Step [15/18], Loss: 0.4169, Train Accuracy: 82.81%\n", 355 | "Epoch [5/100], Step [16/18], Loss: 0.4446, Train Accuracy: 79.69%\n", 356 | "Epoch [5/100], Step [17/18], Loss: 0.4719, Train Accuracy: 81.25%\n", 357 | "Epoch [5/100], Step [18/18], Loss: 0.4198, Train Accuracy: 84.38%\n", 358 | "Epoch [10/100], Step [1/18], Loss: 0.2438, Train Accuracy: 85.16%\n", 359 | "Epoch [10/100], Step [2/18], Loss: 0.2763, Train Accuracy: 84.38%\n", 360 | "Epoch [10/100], Step [3/18], Loss: 0.2925, Train Accuracy: 86.72%\n", 361 | "Epoch [10/100], Step [4/18], Loss: 0.2496, Train Accuracy: 86.72%\n", 362 | "Epoch [10/100], Step [5/18], Loss: 0.2612, Train Accuracy: 86.72%\n", 363 | "Epoch [10/100], Step [6/18], Loss: 0.2786, Train Accuracy: 85.94%\n", 364 | "Epoch [10/100], Step [7/18], Loss: 0.2019, Train Accuracy: 88.28%\n", 365 | "Epoch [10/100], Step [8/18], Loss: 0.2146, Train Accuracy: 89.84%\n", 366 | "Epoch [10/100], Step [9/18], Loss: 0.2667, Train Accuracy: 88.28%\n", 367 | "Epoch [10/100], Step [10/18], Loss: 0.1891, Train Accuracy: 95.31%\n", 368 | "Epoch [10/100], Step [11/18], Loss: 0.2276, Train Accuracy: 89.06%\n", 369 | "Epoch [10/100], Step [12/18], Loss: 0.1835, Train Accuracy: 90.62%\n", 370 | "Epoch [10/100], Step [13/18], Loss: 0.2534, Train Accuracy: 86.72%\n", 371 | "Epoch [10/100], Step [14/18], Loss: 0.2559, Train Accuracy: 89.06%\n", 372 | "Epoch [10/100], Step [15/18], Loss: 0.2870, Train Accuracy: 85.16%\n", 373 | "Epoch [10/100], Step [16/18], Loss: 0.1968, Train Accuracy: 93.75%\n", 374 | "Epoch [10/100], Step [17/18], Loss: 0.1876, Train Accuracy: 89.84%\n", 375 | "Epoch [10/100], Step [18/18], Loss: 0.2809, Train Accuracy: 85.94%\n", 376 | "Epoch [15/100], Step [1/18], Loss: 0.1218, Train Accuracy: 96.09%\n", 377 | "Epoch [15/100], Step [2/18], Loss: 0.1407, Train Accuracy: 96.09%\n", 378 | "Epoch [15/100], Step [3/18], Loss: 0.1623, Train Accuracy: 92.97%\n", 379 | "Epoch [15/100], Step [4/18], Loss: 0.1418, Train Accuracy: 94.53%\n", 380 | "Epoch [15/100], Step [5/18], Loss: 0.1588, Train Accuracy: 93.75%\n", 381 | "Epoch [15/100], Step [6/18], Loss: 0.1397, Train Accuracy: 94.53%\n", 382 | "Epoch [15/100], Step [7/18], Loss: 0.1504, Train Accuracy: 92.19%\n", 383 | "Epoch [15/100], Step [8/18], Loss: 0.1919, Train Accuracy: 89.84%\n", 384 | "Epoch [15/100], Step [9/18], Loss: 0.2122, Train Accuracy: 89.06%\n", 385 | "Epoch [15/100], Step [10/18], Loss: 0.1703, Train Accuracy: 90.62%\n", 386 | "Epoch [15/100], Step [11/18], Loss: 0.1574, Train Accuracy: 92.97%\n", 387 | "Epoch [15/100], Step [12/18], Loss: 0.1435, Train Accuracy: 92.19%\n", 388 | "Epoch [15/100], Step [13/18], Loss: 0.1563, Train Accuracy: 91.41%\n", 389 | "Epoch [15/100], Step [14/18], Loss: 0.2563, Train Accuracy: 88.28%\n", 390 | "Epoch [15/100], Step [15/18], Loss: 0.1192, Train Accuracy: 92.19%\n", 391 | "Epoch [15/100], Step [16/18], Loss: 0.1354, Train Accuracy: 94.53%\n", 392 | "Epoch [15/100], Step [17/18], Loss: 0.1356, Train Accuracy: 95.31%\n", 393 | "Epoch [15/100], Step [18/18], Loss: 0.2137, Train Accuracy: 89.06%\n", 394 | "Epoch [20/100], Step [1/18], Loss: 0.0853, Train Accuracy: 97.66%\n", 395 | "Epoch [20/100], Step [2/18], Loss: 0.0960, Train Accuracy: 97.66%\n", 396 | "Epoch [20/100], Step [3/18], Loss: 0.0753, Train Accuracy: 97.66%\n", 397 | "Epoch [20/100], Step [4/18], Loss: 0.0764, Train Accuracy: 96.88%\n", 398 | "Epoch [20/100], Step [5/18], Loss: 0.1364, Train Accuracy: 94.53%\n", 399 | "Epoch [20/100], Step [6/18], Loss: 0.1054, Train Accuracy: 96.88%\n", 400 | "Epoch [20/100], Step [7/18], Loss: 0.0901, Train Accuracy: 96.88%\n", 401 | "Epoch [20/100], Step [8/18], Loss: 0.0772, Train Accuracy: 98.44%\n", 402 | "Epoch [20/100], Step [9/18], Loss: 0.1278, Train Accuracy: 96.09%\n", 403 | "Epoch [20/100], Step [10/18], Loss: 0.1452, Train Accuracy: 95.31%\n", 404 | "Epoch [20/100], Step [11/18], Loss: 0.0972, Train Accuracy: 96.88%\n", 405 | "Epoch [20/100], Step [12/18], Loss: 0.0929, Train Accuracy: 97.66%\n", 406 | "Epoch [20/100], Step [13/18], Loss: 0.1178, Train Accuracy: 95.31%\n", 407 | "Epoch [20/100], Step [14/18], Loss: 0.1393, Train Accuracy: 94.53%\n", 408 | "Epoch [20/100], Step [15/18], Loss: 0.1091, Train Accuracy: 96.09%\n", 409 | "Epoch [20/100], Step [16/18], Loss: 0.1630, Train Accuracy: 92.19%\n", 410 | "Epoch [20/100], Step [17/18], Loss: 0.1100, Train Accuracy: 96.88%\n", 411 | "Epoch [20/100], Step [18/18], Loss: 0.1205, Train Accuracy: 92.19%\n", 412 | "Epoch [25/100], Step [1/18], Loss: 0.0475, Train Accuracy: 99.22%\n", 413 | "Epoch [25/100], Step [2/18], Loss: 0.0773, Train Accuracy: 96.88%\n", 414 | "Epoch [25/100], Step [3/18], Loss: 0.1187, Train Accuracy: 95.31%\n", 415 | "Epoch [25/100], Step [4/18], Loss: 0.0437, Train Accuracy: 98.44%\n", 416 | "Epoch [25/100], Step [5/18], Loss: 0.0379, Train Accuracy: 100.00%\n", 417 | "Epoch [25/100], Step [6/18], Loss: 0.1490, Train Accuracy: 92.19%\n", 418 | "Epoch [25/100], Step [7/18], Loss: 0.0765, Train Accuracy: 96.88%\n", 419 | "Epoch [25/100], Step [8/18], Loss: 0.0581, Train Accuracy: 98.44%\n", 420 | "Epoch [25/100], Step [9/18], Loss: 0.0425, Train Accuracy: 98.44%\n", 421 | "Epoch [25/100], Step [10/18], Loss: 0.0843, Train Accuracy: 96.88%\n", 422 | "Epoch [25/100], Step [11/18], Loss: 0.0653, Train Accuracy: 96.09%\n", 423 | "Epoch [25/100], Step [12/18], Loss: 0.0563, Train Accuracy: 98.44%\n", 424 | "Epoch [25/100], Step [13/18], Loss: 0.0572, Train Accuracy: 97.66%\n", 425 | "Epoch [25/100], Step [14/18], Loss: 0.0758, Train Accuracy: 96.09%\n", 426 | "Epoch [25/100], Step [15/18], Loss: 0.0483, Train Accuracy: 98.44%\n", 427 | "Epoch [25/100], Step [16/18], Loss: 0.0449, Train Accuracy: 99.22%\n", 428 | "Epoch [25/100], Step [17/18], Loss: 0.0729, Train Accuracy: 96.88%\n", 429 | "Epoch [25/100], Step [18/18], Loss: 0.0633, Train Accuracy: 98.44%\n", 430 | "Epoch [30/100], Step [1/18], Loss: 0.0185, Train Accuracy: 100.00%\n", 431 | "Epoch [30/100], Step [2/18], Loss: 0.0213, Train Accuracy: 99.22%\n", 432 | "Epoch [30/100], Step [3/18], Loss: 0.0351, Train Accuracy: 100.00%\n", 433 | "Epoch [30/100], Step [4/18], Loss: 0.0331, Train Accuracy: 99.22%\n", 434 | "Epoch [30/100], Step [5/18], Loss: 0.0575, Train Accuracy: 98.44%\n", 435 | "Epoch [30/100], Step [6/18], Loss: 0.0545, Train Accuracy: 96.88%\n", 436 | "Epoch [30/100], Step [7/18], Loss: 0.0247, Train Accuracy: 99.22%\n", 437 | "Epoch [30/100], Step [8/18], Loss: 0.0260, Train Accuracy: 100.00%\n", 438 | "Epoch [30/100], Step [9/18], Loss: 0.0308, Train Accuracy: 100.00%\n", 439 | "Epoch [30/100], Step [10/18], Loss: 0.0528, Train Accuracy: 97.66%\n", 440 | "Epoch [30/100], Step [11/18], Loss: 0.0587, Train Accuracy: 97.66%\n", 441 | "Epoch [30/100], Step [12/18], Loss: 0.0709, Train Accuracy: 97.66%\n", 442 | "Epoch [30/100], Step [13/18], Loss: 0.0630, Train Accuracy: 98.44%\n", 443 | "Epoch [30/100], Step [14/18], Loss: 0.0454, Train Accuracy: 98.44%\n", 444 | "Epoch [30/100], Step [15/18], Loss: 0.1319, Train Accuracy: 96.88%\n", 445 | "Epoch [30/100], Step [16/18], Loss: 0.0621, Train Accuracy: 97.66%\n" 446 | ] 447 | }, 448 | { 449 | "name": "stdout", 450 | "output_type": "stream", 451 | "text": [ 452 | "Epoch [30/100], Step [17/18], Loss: 0.1192, Train Accuracy: 95.31%\n", 453 | "Epoch [30/100], Step [18/18], Loss: 0.0472, Train Accuracy: 96.88%\n", 454 | "Epoch [35/100], Step [1/18], Loss: 0.0985, Train Accuracy: 96.88%\n", 455 | "Epoch [35/100], Step [2/18], Loss: 0.0181, Train Accuracy: 100.00%\n", 456 | "Epoch [35/100], Step [3/18], Loss: 0.0432, Train Accuracy: 99.22%\n", 457 | "Epoch [35/100], Step [4/18], Loss: 0.0185, Train Accuracy: 100.00%\n", 458 | "Epoch [35/100], Step [5/18], Loss: 0.0700, Train Accuracy: 95.31%\n", 459 | "Epoch [35/100], Step [6/18], Loss: 0.0484, Train Accuracy: 98.44%\n", 460 | "Epoch [35/100], Step [7/18], Loss: 0.0607, Train Accuracy: 97.66%\n", 461 | "Epoch [35/100], Step [8/18], Loss: 0.0152, Train Accuracy: 99.22%\n", 462 | "Epoch [35/100], Step [9/18], Loss: 0.1009, Train Accuracy: 96.09%\n", 463 | "Epoch [35/100], Step [10/18], Loss: 0.0694, Train Accuracy: 96.09%\n", 464 | "Epoch [35/100], Step [11/18], Loss: 0.0466, Train Accuracy: 97.66%\n", 465 | "Epoch [35/100], Step [12/18], Loss: 0.1283, Train Accuracy: 93.75%\n", 466 | "Epoch [35/100], Step [13/18], Loss: 0.0505, Train Accuracy: 97.66%\n", 467 | "Epoch [35/100], Step [14/18], Loss: 0.0109, Train Accuracy: 100.00%\n", 468 | "Epoch [35/100], Step [15/18], Loss: 0.0368, Train Accuracy: 99.22%\n", 469 | "Epoch [35/100], Step [16/18], Loss: 0.0836, Train Accuracy: 96.88%\n", 470 | "Epoch [35/100], Step [17/18], Loss: 0.0705, Train Accuracy: 96.09%\n", 471 | "Epoch [35/100], Step [18/18], Loss: 0.0136, Train Accuracy: 100.00%\n", 472 | "Epoch [40/100], Step [1/18], Loss: 0.0742, Train Accuracy: 96.09%\n", 473 | "Epoch [40/100], Step [2/18], Loss: 0.0362, Train Accuracy: 98.44%\n", 474 | "Epoch [40/100], Step [3/18], Loss: 0.0120, Train Accuracy: 100.00%\n", 475 | "Epoch [40/100], Step [4/18], Loss: 0.0208, Train Accuracy: 100.00%\n", 476 | "Epoch [40/100], Step [5/18], Loss: 0.0212, Train Accuracy: 100.00%\n", 477 | "Epoch [40/100], Step [6/18], Loss: 0.0306, Train Accuracy: 99.22%\n", 478 | "Epoch [40/100], Step [7/18], Loss: 0.0081, Train Accuracy: 100.00%\n", 479 | "Epoch [40/100], Step [8/18], Loss: 0.0209, Train Accuracy: 99.22%\n", 480 | "Epoch [40/100], Step [9/18], Loss: 0.0079, Train Accuracy: 100.00%\n", 481 | "Epoch [40/100], Step [10/18], Loss: 0.0098, Train Accuracy: 100.00%\n", 482 | "Epoch [40/100], Step [11/18], Loss: 0.0120, Train Accuracy: 100.00%\n", 483 | "Epoch [40/100], Step [12/18], Loss: 0.0173, Train Accuracy: 100.00%\n", 484 | "Epoch [40/100], Step [13/18], Loss: 0.0220, Train Accuracy: 99.22%\n", 485 | "Epoch [40/100], Step [14/18], Loss: 0.0216, Train Accuracy: 99.22%\n", 486 | "Epoch [40/100], Step [15/18], Loss: 0.0180, Train Accuracy: 99.22%\n", 487 | "Epoch [40/100], Step [16/18], Loss: 0.0152, Train Accuracy: 100.00%\n", 488 | "Epoch [40/100], Step [17/18], Loss: 0.0203, Train Accuracy: 98.44%\n", 489 | "Epoch [40/100], Step [18/18], Loss: 0.0037, Train Accuracy: 100.00%\n", 490 | "Epoch [45/100], Step [1/18], Loss: 0.0086, Train Accuracy: 100.00%\n", 491 | "Epoch [45/100], Step [2/18], Loss: 0.0296, Train Accuracy: 99.22%\n", 492 | "Epoch [45/100], Step [3/18], Loss: 0.0143, Train Accuracy: 99.22%\n", 493 | "Epoch [45/100], Step [4/18], Loss: 0.0062, Train Accuracy: 100.00%\n", 494 | "Epoch [45/100], Step [5/18], Loss: 0.0067, Train Accuracy: 100.00%\n", 495 | "Epoch [45/100], Step [6/18], Loss: 0.0147, Train Accuracy: 100.00%\n", 496 | "Epoch [45/100], Step [7/18], Loss: 0.0179, Train Accuracy: 99.22%\n", 497 | "Epoch [45/100], Step [8/18], Loss: 0.0120, Train Accuracy: 99.22%\n", 498 | "Epoch [45/100], Step [9/18], Loss: 0.0112, Train Accuracy: 100.00%\n", 499 | "Epoch [45/100], Step [10/18], Loss: 0.0098, Train Accuracy: 100.00%\n", 500 | "Epoch [45/100], Step [11/18], Loss: 0.0121, Train Accuracy: 99.22%\n", 501 | "Epoch [45/100], Step [12/18], Loss: 0.0060, Train Accuracy: 100.00%\n", 502 | "Epoch [45/100], Step [13/18], Loss: 0.0061, Train Accuracy: 100.00%\n", 503 | "Epoch [45/100], Step [14/18], Loss: 0.0060, Train Accuracy: 100.00%\n", 504 | "Epoch [45/100], Step [15/18], Loss: 0.0073, Train Accuracy: 100.00%\n", 505 | "Epoch [45/100], Step [16/18], Loss: 0.0082, Train Accuracy: 100.00%\n", 506 | "Epoch [45/100], Step [17/18], Loss: 0.0066, Train Accuracy: 100.00%\n", 507 | "Epoch [45/100], Step [18/18], Loss: 0.0057, Train Accuracy: 100.00%\n", 508 | "Epoch [50/100], Step [1/18], Loss: 0.0076, Train Accuracy: 100.00%\n", 509 | "Epoch [50/100], Step [2/18], Loss: 0.0056, Train Accuracy: 100.00%\n", 510 | "Epoch [50/100], Step [3/18], Loss: 0.0022, Train Accuracy: 100.00%\n", 511 | "Epoch [50/100], Step [4/18], Loss: 0.0039, Train Accuracy: 100.00%\n", 512 | "Epoch [50/100], Step [5/18], Loss: 0.0058, Train Accuracy: 100.00%\n", 513 | "Epoch [50/100], Step [6/18], Loss: 0.0096, Train Accuracy: 100.00%\n", 514 | "Epoch [50/100], Step [7/18], Loss: 0.0042, Train Accuracy: 100.00%\n", 515 | "Epoch [50/100], Step [8/18], Loss: 0.0053, Train Accuracy: 100.00%\n", 516 | "Epoch [50/100], Step [9/18], Loss: 0.0105, Train Accuracy: 100.00%\n", 517 | "Epoch [50/100], Step [10/18], Loss: 0.0052, Train Accuracy: 100.00%\n", 518 | "Epoch [50/100], Step [11/18], Loss: 0.0055, Train Accuracy: 100.00%\n", 519 | "Epoch [50/100], Step [12/18], Loss: 0.0083, Train Accuracy: 100.00%\n", 520 | "Epoch [50/100], Step [13/18], Loss: 0.0141, Train Accuracy: 99.22%\n", 521 | "Epoch [50/100], Step [14/18], Loss: 0.0042, Train Accuracy: 100.00%\n", 522 | "Epoch [50/100], Step [15/18], Loss: 0.0056, Train Accuracy: 100.00%\n", 523 | "Epoch [50/100], Step [16/18], Loss: 0.0118, Train Accuracy: 99.22%\n", 524 | "Epoch [50/100], Step [17/18], Loss: 0.0090, Train Accuracy: 100.00%\n", 525 | "Epoch [50/100], Step [18/18], Loss: 0.0212, Train Accuracy: 98.44%\n", 526 | "Epoch [55/100], Step [1/18], Loss: 0.0050, Train Accuracy: 100.00%\n", 527 | "Epoch [55/100], Step [2/18], Loss: 0.0022, Train Accuracy: 100.00%\n", 528 | "Epoch [55/100], Step [3/18], Loss: 0.0027, Train Accuracy: 100.00%\n", 529 | "Epoch [55/100], Step [4/18], Loss: 0.0152, Train Accuracy: 100.00%\n", 530 | "Epoch [55/100], Step [5/18], Loss: 0.0028, Train Accuracy: 100.00%\n", 531 | "Epoch [55/100], Step [6/18], Loss: 0.0088, Train Accuracy: 100.00%\n", 532 | "Epoch [55/100], Step [7/18], Loss: 0.0025, Train Accuracy: 100.00%\n", 533 | "Epoch [55/100], Step [8/18], Loss: 0.0035, Train Accuracy: 100.00%\n", 534 | "Epoch [55/100], Step [9/18], Loss: 0.0288, Train Accuracy: 99.22%\n", 535 | "Epoch [55/100], Step [10/18], Loss: 0.0070, Train Accuracy: 100.00%\n", 536 | "Epoch [55/100], Step [11/18], Loss: 0.0032, Train Accuracy: 100.00%\n", 537 | "Epoch [55/100], Step [12/18], Loss: 0.0043, Train Accuracy: 100.00%\n", 538 | "Epoch [55/100], Step [13/18], Loss: 0.0038, Train Accuracy: 100.00%\n", 539 | "Epoch [55/100], Step [14/18], Loss: 0.0181, Train Accuracy: 99.22%\n", 540 | "Epoch [55/100], Step [15/18], Loss: 0.0055, Train Accuracy: 100.00%\n", 541 | "Epoch [55/100], Step [16/18], Loss: 0.0047, Train Accuracy: 100.00%\n", 542 | "Epoch [55/100], Step [17/18], Loss: 0.0023, Train Accuracy: 100.00%\n", 543 | "Epoch [55/100], Step [18/18], Loss: 0.0013, Train Accuracy: 100.00%\n", 544 | "Epoch [60/100], Step [1/18], Loss: 0.0196, Train Accuracy: 100.00%\n", 545 | "Epoch [60/100], Step [2/18], Loss: 0.0168, Train Accuracy: 99.22%\n", 546 | "Epoch [60/100], Step [3/18], Loss: 0.0471, Train Accuracy: 98.44%\n", 547 | "Epoch [60/100], Step [4/18], Loss: 0.0355, Train Accuracy: 99.22%\n", 548 | "Epoch [60/100], Step [5/18], Loss: 0.0434, Train Accuracy: 98.44%\n", 549 | "Epoch [60/100], Step [6/18], Loss: 0.0395, Train Accuracy: 97.66%\n", 550 | "Epoch [60/100], Step [7/18], Loss: 0.0158, Train Accuracy: 100.00%\n", 551 | "Epoch [60/100], Step [8/18], Loss: 0.0114, Train Accuracy: 100.00%\n", 552 | "Epoch [60/100], Step [9/18], Loss: 0.0170, Train Accuracy: 99.22%\n", 553 | "Epoch [60/100], Step [10/18], Loss: 0.0291, Train Accuracy: 98.44%\n", 554 | "Epoch [60/100], Step [11/18], Loss: 0.1362, Train Accuracy: 96.09%\n", 555 | "Epoch [60/100], Step [12/18], Loss: 0.0189, Train Accuracy: 99.22%\n", 556 | "Epoch [60/100], Step [13/18], Loss: 0.0373, Train Accuracy: 98.44%\n", 557 | "Epoch [60/100], Step [14/18], Loss: 0.0500, Train Accuracy: 98.44%\n", 558 | "Epoch [60/100], Step [15/18], Loss: 0.0743, Train Accuracy: 96.88%\n", 559 | "Epoch [60/100], Step [16/18], Loss: 0.0224, Train Accuracy: 100.00%\n", 560 | "Epoch [60/100], Step [17/18], Loss: 0.0326, Train Accuracy: 99.22%\n", 561 | "Epoch [60/100], Step [18/18], Loss: 0.0738, Train Accuracy: 96.88%\n", 562 | "Epoch [65/100], Step [1/18], Loss: 0.0106, Train Accuracy: 100.00%\n", 563 | "Epoch [65/100], Step [2/18], Loss: 0.0027, Train Accuracy: 100.00%\n", 564 | "Epoch [65/100], Step [3/18], Loss: 0.0058, Train Accuracy: 100.00%\n", 565 | "Epoch [65/100], Step [4/18], Loss: 0.0064, Train Accuracy: 100.00%\n", 566 | "Epoch [65/100], Step [5/18], Loss: 0.0070, Train Accuracy: 100.00%\n", 567 | "Epoch [65/100], Step [6/18], Loss: 0.0046, Train Accuracy: 100.00%\n", 568 | "Epoch [65/100], Step [7/18], Loss: 0.0034, Train Accuracy: 100.00%\n", 569 | "Epoch [65/100], Step [8/18], Loss: 0.0050, Train Accuracy: 100.00%\n", 570 | "Epoch [65/100], Step [9/18], Loss: 0.0091, Train Accuracy: 100.00%\n", 571 | "Epoch [65/100], Step [10/18], Loss: 0.0033, Train Accuracy: 100.00%\n", 572 | "Epoch [65/100], Step [11/18], Loss: 0.0075, Train Accuracy: 100.00%\n", 573 | "Epoch [65/100], Step [12/18], Loss: 0.0019, Train Accuracy: 100.00%\n", 574 | "Epoch [65/100], Step [13/18], Loss: 0.0098, Train Accuracy: 99.22%\n", 575 | "Epoch [65/100], Step [14/18], Loss: 0.0027, Train Accuracy: 100.00%\n" 576 | ] 577 | }, 578 | { 579 | "name": "stdout", 580 | "output_type": "stream", 581 | "text": [ 582 | "Epoch [65/100], Step [15/18], Loss: 0.0054, Train Accuracy: 100.00%\n", 583 | "Epoch [65/100], Step [16/18], Loss: 0.0012, Train Accuracy: 100.00%\n", 584 | "Epoch [65/100], Step [17/18], Loss: 0.0084, Train Accuracy: 100.00%\n", 585 | "Epoch [65/100], Step [18/18], Loss: 0.0035, Train Accuracy: 100.00%\n", 586 | "Epoch [70/100], Step [1/18], Loss: 0.0019, Train Accuracy: 100.00%\n", 587 | "Epoch [70/100], Step [2/18], Loss: 0.0048, Train Accuracy: 100.00%\n", 588 | "Epoch [70/100], Step [3/18], Loss: 0.0028, Train Accuracy: 100.00%\n", 589 | "Epoch [70/100], Step [4/18], Loss: 0.0013, Train Accuracy: 100.00%\n", 590 | "Epoch [70/100], Step [5/18], Loss: 0.0029, Train Accuracy: 100.00%\n", 591 | "Epoch [70/100], Step [6/18], Loss: 0.0018, Train Accuracy: 100.00%\n", 592 | "Epoch [70/100], Step [7/18], Loss: 0.0028, Train Accuracy: 100.00%\n", 593 | "Epoch [70/100], Step [8/18], Loss: 0.0037, Train Accuracy: 100.00%\n", 594 | "Epoch [70/100], Step [9/18], Loss: 0.0015, Train Accuracy: 100.00%\n", 595 | "Epoch [70/100], Step [10/18], Loss: 0.0057, Train Accuracy: 100.00%\n", 596 | "Epoch [70/100], Step [11/18], Loss: 0.0015, Train Accuracy: 100.00%\n", 597 | "Epoch [70/100], Step [12/18], Loss: 0.0031, Train Accuracy: 100.00%\n", 598 | "Epoch [70/100], Step [13/18], Loss: 0.0018, Train Accuracy: 100.00%\n", 599 | "Epoch [70/100], Step [14/18], Loss: 0.0030, Train Accuracy: 100.00%\n", 600 | "Epoch [70/100], Step [15/18], Loss: 0.0023, Train Accuracy: 100.00%\n", 601 | "Epoch [70/100], Step [16/18], Loss: 0.0012, Train Accuracy: 100.00%\n", 602 | "Epoch [70/100], Step [17/18], Loss: 0.0032, Train Accuracy: 100.00%\n", 603 | "Epoch [70/100], Step [18/18], Loss: 0.0077, Train Accuracy: 100.00%\n", 604 | "Epoch [75/100], Step [1/18], Loss: 0.0025, Train Accuracy: 100.00%\n", 605 | "Epoch [75/100], Step [2/18], Loss: 0.0021, Train Accuracy: 100.00%\n", 606 | "Epoch [75/100], Step [3/18], Loss: 0.0010, Train Accuracy: 100.00%\n", 607 | "Epoch [75/100], Step [4/18], Loss: 0.0027, Train Accuracy: 100.00%\n", 608 | "Epoch [75/100], Step [5/18], Loss: 0.0013, Train Accuracy: 100.00%\n", 609 | "Epoch [75/100], Step [6/18], Loss: 0.0012, Train Accuracy: 100.00%\n", 610 | "Epoch [75/100], Step [7/18], Loss: 0.0019, Train Accuracy: 100.00%\n", 611 | "Epoch [75/100], Step [8/18], Loss: 0.0012, Train Accuracy: 100.00%\n", 612 | "Epoch [75/100], Step [9/18], Loss: 0.0092, Train Accuracy: 99.22%\n", 613 | "Epoch [75/100], Step [10/18], Loss: 0.0019, Train Accuracy: 100.00%\n", 614 | "Epoch [75/100], Step [11/18], Loss: 0.0012, Train Accuracy: 100.00%\n", 615 | "Epoch [75/100], Step [12/18], Loss: 0.0025, Train Accuracy: 100.00%\n", 616 | "Epoch [75/100], Step [13/18], Loss: 0.0020, Train Accuracy: 100.00%\n", 617 | "Epoch [75/100], Step [14/18], Loss: 0.0014, Train Accuracy: 100.00%\n", 618 | "Epoch [75/100], Step [15/18], Loss: 0.0060, Train Accuracy: 100.00%\n", 619 | "Epoch [75/100], Step [16/18], Loss: 0.0031, Train Accuracy: 100.00%\n", 620 | "Epoch [75/100], Step [17/18], Loss: 0.0026, Train Accuracy: 100.00%\n", 621 | "Epoch [75/100], Step [18/18], Loss: 0.0009, Train Accuracy: 100.00%\n", 622 | "Epoch [80/100], Step [1/18], Loss: 0.0023, Train Accuracy: 100.00%\n", 623 | "Epoch [80/100], Step [2/18], Loss: 0.0022, Train Accuracy: 100.00%\n", 624 | "Epoch [80/100], Step [3/18], Loss: 0.0021, Train Accuracy: 100.00%\n", 625 | "Epoch [80/100], Step [4/18], Loss: 0.0012, Train Accuracy: 100.00%\n", 626 | "Epoch [80/100], Step [5/18], Loss: 0.0057, Train Accuracy: 100.00%\n", 627 | "Epoch [80/100], Step [6/18], Loss: 0.0022, Train Accuracy: 100.00%\n", 628 | "Epoch [80/100], Step [7/18], Loss: 0.0018, Train Accuracy: 100.00%\n", 629 | "Epoch [80/100], Step [8/18], Loss: 0.0033, Train Accuracy: 100.00%\n", 630 | "Epoch [80/100], Step [9/18], Loss: 0.0011, Train Accuracy: 100.00%\n", 631 | "Epoch [80/100], Step [10/18], Loss: 0.0008, Train Accuracy: 100.00%\n", 632 | "Epoch [80/100], Step [11/18], Loss: 0.0022, Train Accuracy: 100.00%\n", 633 | "Epoch [80/100], Step [12/18], Loss: 0.0065, Train Accuracy: 100.00%\n", 634 | "Epoch [80/100], Step [13/18], Loss: 0.0012, Train Accuracy: 100.00%\n", 635 | "Epoch [80/100], Step [14/18], Loss: 0.0025, Train Accuracy: 100.00%\n", 636 | "Epoch [80/100], Step [15/18], Loss: 0.0011, Train Accuracy: 100.00%\n", 637 | "Epoch [80/100], Step [16/18], Loss: 0.0009, Train Accuracy: 100.00%\n", 638 | "Epoch [80/100], Step [17/18], Loss: 0.0057, Train Accuracy: 100.00%\n", 639 | "Epoch [80/100], Step [18/18], Loss: 0.0019, Train Accuracy: 100.00%\n", 640 | "Epoch [85/100], Step [1/18], Loss: 0.0005, Train Accuracy: 100.00%\n", 641 | "Epoch [85/100], Step [2/18], Loss: 0.0026, Train Accuracy: 100.00%\n", 642 | "Epoch [85/100], Step [3/18], Loss: 0.0010, Train Accuracy: 100.00%\n", 643 | "Epoch [85/100], Step [4/18], Loss: 0.0219, Train Accuracy: 99.22%\n", 644 | "Epoch [85/100], Step [5/18], Loss: 0.0019, Train Accuracy: 100.00%\n", 645 | "Epoch [85/100], Step [6/18], Loss: 0.0326, Train Accuracy: 98.44%\n", 646 | "Epoch [85/100], Step [7/18], Loss: 0.0007, Train Accuracy: 100.00%\n", 647 | "Epoch [85/100], Step [8/18], Loss: 0.0195, Train Accuracy: 98.44%\n", 648 | "Epoch [85/100], Step [9/18], Loss: 0.0007, Train Accuracy: 100.00%\n", 649 | "Epoch [85/100], Step [10/18], Loss: 0.0054, Train Accuracy: 100.00%\n", 650 | "Epoch [85/100], Step [11/18], Loss: 0.0047, Train Accuracy: 100.00%\n", 651 | "Epoch [85/100], Step [12/18], Loss: 0.0150, Train Accuracy: 99.22%\n", 652 | "Epoch [85/100], Step [13/18], Loss: 0.0174, Train Accuracy: 99.22%\n", 653 | "Epoch [85/100], Step [14/18], Loss: 0.0017, Train Accuracy: 100.00%\n", 654 | "Epoch [85/100], Step [15/18], Loss: 0.0035, Train Accuracy: 100.00%\n", 655 | "Epoch [85/100], Step [16/18], Loss: 0.0003, Train Accuracy: 100.00%\n", 656 | "Epoch [85/100], Step [17/18], Loss: 0.0024, Train Accuracy: 100.00%\n", 657 | "Epoch [85/100], Step [18/18], Loss: 0.0015, Train Accuracy: 100.00%\n", 658 | "Epoch [90/100], Step [1/18], Loss: 0.0016, Train Accuracy: 100.00%\n", 659 | "Epoch [90/100], Step [2/18], Loss: 0.0004, Train Accuracy: 100.00%\n", 660 | "Epoch [90/100], Step [3/18], Loss: 0.0016, Train Accuracy: 100.00%\n", 661 | "Epoch [90/100], Step [4/18], Loss: 0.0010, Train Accuracy: 100.00%\n", 662 | "Epoch [90/100], Step [5/18], Loss: 0.0013, Train Accuracy: 100.00%\n", 663 | "Epoch [90/100], Step [6/18], Loss: 0.0018, Train Accuracy: 100.00%\n", 664 | "Epoch [90/100], Step [7/18], Loss: 0.0005, Train Accuracy: 100.00%\n", 665 | "Epoch [90/100], Step [8/18], Loss: 0.0020, Train Accuracy: 100.00%\n", 666 | "Epoch [90/100], Step [9/18], Loss: 0.0008, Train Accuracy: 100.00%\n", 667 | "Epoch [90/100], Step [10/18], Loss: 0.0015, Train Accuracy: 100.00%\n", 668 | "Epoch [90/100], Step [11/18], Loss: 0.0012, Train Accuracy: 100.00%\n", 669 | "Epoch [90/100], Step [12/18], Loss: 0.0023, Train Accuracy: 100.00%\n", 670 | "Epoch [90/100], Step [13/18], Loss: 0.0005, Train Accuracy: 100.00%\n", 671 | "Epoch [90/100], Step [14/18], Loss: 0.0024, Train Accuracy: 100.00%\n", 672 | "Epoch [90/100], Step [15/18], Loss: 0.0015, Train Accuracy: 100.00%\n", 673 | "Epoch [90/100], Step [16/18], Loss: 0.0006, Train Accuracy: 100.00%\n", 674 | "Epoch [90/100], Step [17/18], Loss: 0.0416, Train Accuracy: 99.22%\n", 675 | "Epoch [90/100], Step [18/18], Loss: 0.0014, Train Accuracy: 100.00%\n", 676 | "Epoch [95/100], Step [1/18], Loss: 0.0042, Train Accuracy: 100.00%\n", 677 | "Epoch [95/100], Step [2/18], Loss: 0.0043, Train Accuracy: 100.00%\n", 678 | "Epoch [95/100], Step [3/18], Loss: 0.0010, Train Accuracy: 100.00%\n", 679 | "Epoch [95/100], Step [4/18], Loss: 0.0007, Train Accuracy: 100.00%\n", 680 | "Epoch [95/100], Step [5/18], Loss: 0.0032, Train Accuracy: 100.00%\n", 681 | "Epoch [95/100], Step [6/18], Loss: 0.0023, Train Accuracy: 100.00%\n", 682 | "Epoch [95/100], Step [7/18], Loss: 0.0052, Train Accuracy: 100.00%\n", 683 | "Epoch [95/100], Step [8/18], Loss: 0.0036, Train Accuracy: 100.00%\n", 684 | "Epoch [95/100], Step [9/18], Loss: 0.0008, Train Accuracy: 100.00%\n", 685 | "Epoch [95/100], Step [10/18], Loss: 0.0109, Train Accuracy: 99.22%\n", 686 | "Epoch [95/100], Step [11/18], Loss: 0.0010, Train Accuracy: 100.00%\n", 687 | "Epoch [95/100], Step [12/18], Loss: 0.0009, Train Accuracy: 100.00%\n", 688 | "Epoch [95/100], Step [13/18], Loss: 0.0028, Train Accuracy: 100.00%\n", 689 | "Epoch [95/100], Step [14/18], Loss: 0.0013, Train Accuracy: 100.00%\n", 690 | "Epoch [95/100], Step [15/18], Loss: 0.0024, Train Accuracy: 100.00%\n", 691 | "Epoch [95/100], Step [16/18], Loss: 0.0056, Train Accuracy: 100.00%\n", 692 | "Epoch [95/100], Step [17/18], Loss: 0.0097, Train Accuracy: 99.22%\n", 693 | "Epoch [95/100], Step [18/18], Loss: 0.0033, Train Accuracy: 100.00%\n", 694 | "Epoch [100/100], Step [1/18], Loss: 0.0015, Train Accuracy: 100.00%\n", 695 | "Epoch [100/100], Step [2/18], Loss: 0.0004, Train Accuracy: 100.00%\n", 696 | "Epoch [100/100], Step [3/18], Loss: 0.0007, Train Accuracy: 100.00%\n", 697 | "Epoch [100/100], Step [4/18], Loss: 0.0004, Train Accuracy: 100.00%\n", 698 | "Epoch [100/100], Step [5/18], Loss: 0.0011, Train Accuracy: 100.00%\n", 699 | "Epoch [100/100], Step [6/18], Loss: 0.0004, Train Accuracy: 100.00%\n", 700 | "Epoch [100/100], Step [7/18], Loss: 0.0002, Train Accuracy: 100.00%\n", 701 | "Epoch [100/100], Step [8/18], Loss: 0.0006, Train Accuracy: 100.00%\n", 702 | "Epoch [100/100], Step [9/18], Loss: 0.0007, Train Accuracy: 100.00%\n", 703 | "Epoch [100/100], Step [10/18], Loss: 0.0005, Train Accuracy: 100.00%\n" 704 | ] 705 | }, 706 | { 707 | "name": "stdout", 708 | "output_type": "stream", 709 | "text": [ 710 | "Epoch [100/100], Step [11/18], Loss: 0.0012, Train Accuracy: 100.00%\n", 711 | "Epoch [100/100], Step [12/18], Loss: 0.0010, Train Accuracy: 100.00%\n", 712 | "Epoch [100/100], Step [13/18], Loss: 0.0007, Train Accuracy: 100.00%\n", 713 | "Epoch [100/100], Step [14/18], Loss: 0.0012, Train Accuracy: 100.00%\n", 714 | "Epoch [100/100], Step [15/18], Loss: 0.0019, Train Accuracy: 100.00%\n", 715 | "Epoch [100/100], Step [16/18], Loss: 0.0029, Train Accuracy: 100.00%\n", 716 | "Epoch [100/100], Step [17/18], Loss: 0.0005, Train Accuracy: 100.00%\n", 717 | "Epoch [100/100], Step [18/18], Loss: 0.0006, Train Accuracy: 100.00%\n" 718 | ] 719 | } 720 | ], 721 | "source": [ 722 | "total_step = len(train_loader)\n", 723 | "loss_list = []\n", 724 | "acc_list = []\n", 725 | "for epoch in range(num_epochs):\n", 726 | " for i, (signals, labels) in enumerate(train_loader):\n", 727 | " optimizer.zero_grad()\n", 728 | " # Run the forward pass\n", 729 | " signals=signals\n", 730 | " labels=labels\n", 731 | " outputs = cnn(signals.double())\n", 732 | " loss = criterion(outputs, labels.long())\n", 733 | " \n", 734 | " loss_list.append(loss.item())\n", 735 | "\n", 736 | " # Backprop and perform Adam optimisation\n", 737 | " \n", 738 | " loss.backward()\n", 739 | " optimizer.step()\n", 740 | " # Track the accuracy\n", 741 | " total = labels.size(0)\n", 742 | " _, predicted = torch.max(outputs.data, 1)\n", 743 | " correct = (predicted == labels.long()).sum().item()\n", 744 | " acc_list.append(correct / total)\n", 745 | "\n", 746 | " if (epoch+1) % 5 == 0 or epoch==0:\n", 747 | " print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}, Train Accuracy: {:.2f}%'\n", 748 | " .format(epoch + 1, num_epochs, i + 1, total_step, loss.item(),\n", 749 | " (correct / total) * 100))\n", 750 | " " 751 | ] 752 | }, 753 | { 754 | "cell_type": "code", 755 | "execution_count": 25, 756 | "metadata": {}, 757 | "outputs": [ 758 | { 759 | "name": "stdout", 760 | "output_type": "stream", 761 | "text": [ 762 | "1\n", 763 | "Epoch [100/100], Step [1/1], Loss: 0.0686, Accuracy: 98.57%\n" 764 | ] 765 | } 766 | ], 767 | "source": [ 768 | "total_step = len(test_loader)\n", 769 | "print(total_step)\n", 770 | "loss_list_test = []\n", 771 | "acc_list_test = []\n", 772 | "with torch.no_grad():\n", 773 | " for i, (signals, labels) in enumerate(test_loader):\n", 774 | " # Run the forward pass\n", 775 | " signals=signals\n", 776 | " labels=labels\n", 777 | " outputs = cnn(signals.double())\n", 778 | " loss = criterion(outputs, labels.long())\n", 779 | " loss_list_test.append(loss.item())\n", 780 | " if epoch%10 ==0:\n", 781 | " print(loss)\n", 782 | " total = labels.size(0)\n", 783 | " _, predicted = torch.max(outputs.data, 1)\n", 784 | " correct = (predicted == labels.long()).sum().item()\n", 785 | " acc_list_test.append(correct / total)\n", 786 | " if (epoch) % 1 == 0:\n", 787 | " print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}, Accuracy: {:.2f}%'\n", 788 | " .format(epoch + 1, num_epochs, i + 1, total_step, loss.item(),\n", 789 | " (correct / total) * 100))" 790 | ] 791 | }, 792 | { 793 | "cell_type": "code", 794 | "execution_count": 26, 795 | "metadata": {}, 796 | "outputs": [ 797 | { 798 | "name": "stderr", 799 | "output_type": "stream", 800 | "text": [ 801 | "/home/cmu/anaconda3/lib/python3.7/site-packages/torch/serialization.py:402: UserWarning: Couldn't retrieve source code for container of type CNN. It won't be checked for correctness upon loading.\n", 802 | " \"type \" + obj.__name__ + \". It won't be checked \"\n" 803 | ] 804 | } 805 | ], 806 | "source": [ 807 | "# if you need to save\n", 808 | "torch.save(cnn,'cnnTC3_fold3_45.pth')" 809 | ] 810 | }, 811 | { 812 | "cell_type": "code", 813 | "execution_count": null, 814 | "metadata": {}, 815 | "outputs": [], 816 | "source": [] 817 | } 818 | ], 819 | "metadata": { 820 | "kernelspec": { 821 | "display_name": "Python 3", 822 | "language": "python", 823 | "name": "python3" 824 | }, 825 | "language_info": { 826 | "codemirror_mode": { 827 | "name": "ipython", 828 | "version": 3 829 | }, 830 | "file_extension": ".py", 831 | "mimetype": "text/x-python", 832 | "name": "python", 833 | "nbconvert_exporter": "python", 834 | "pygments_lexer": "ipython3", 835 | "version": "3.7.4" 836 | } 837 | }, 838 | "nbformat": 4, 839 | "nbformat_minor": 2 840 | } 841 | -------------------------------------------------------------------------------- /FaultNet_Machine_Learning_Algorithm.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# FaultNet: Machine Learning Algorithm" 8 | ] 9 | }, 10 | { 11 | "cell_type": "markdown", 12 | "metadata": {}, 13 | "source": [ 14 | "##### Importing all the required directories" 15 | ] 16 | }, 17 | { 18 | "cell_type": "code", 19 | "execution_count": 1, 20 | "metadata": {}, 21 | "outputs": [], 22 | "source": [ 23 | "import numpy as np\n", 24 | "from sklearn.svm import SVC\n", 25 | "from sklearn.ensemble import RandomForestClassifier\n", 26 | "from sklearn.linear_model import LogisticRegression\n", 27 | "from sklearn.neighbors import KNeighborsClassifier\n", 28 | "from sklearn.neural_network import MLPClassifier\n", 29 | "from sklearn.model_selection import cross_val_score\n", 30 | "import warnings" 31 | ] 32 | }, 33 | { 34 | "cell_type": "code", 35 | "execution_count": 2, 36 | "metadata": {}, 37 | "outputs": [], 38 | "source": [ 39 | "warnings.filterwarnings(\"ignore\")" 40 | ] 41 | }, 42 | { 43 | "cell_type": "markdown", 44 | "metadata": {}, 45 | "source": [ 46 | "##### Importing the featurized data" 47 | ] 48 | }, 49 | { 50 | "cell_type": "code", 51 | "execution_count": 3, 52 | "metadata": {}, 53 | "outputs": [ 54 | { 55 | "name": "stdout", 56 | "output_type": "stream", 57 | "text": [ 58 | "Train data shape: (2800, 14)\n" 59 | ] 60 | } 61 | ], 62 | "source": [ 63 | "x = np.load('cwru_feature.npy', allow_pickle = True)\n", 64 | "y = np.load('cwru_lables.npy', allow_pickle = True)\n", 65 | "print(\"Train data shape: \", x.shape)" 66 | ] 67 | }, 68 | { 69 | "cell_type": "markdown", 70 | "metadata": {}, 71 | "source": [ 72 | "##### Features" 73 | ] 74 | }, 75 | { 76 | "cell_type": "code", 77 | "execution_count": 4, 78 | "metadata": {}, 79 | "outputs": [ 80 | { 81 | "name": "stdout", 82 | "output_type": "stream", 83 | "text": [ 84 | "Features: ['Kurtosis', 'Skewness', 'Mean', 'Max', 'Min', 'Peak to peak', 'Variance', 'RMS', 'Absolute mean', 'Shape factor', 'Impulse factor', 'Crest factor', 'Absolute max', 'Clearance factor']\n" 85 | ] 86 | } 87 | ], 88 | "source": [ 89 | "features = ['Kurtosis', 'Skewness', 'Mean', 'Max', 'Min', 'Peak to peak', 'Variance', 'RMS', 'Absolute mean', 'Shape factor', 'Impulse factor', 'Crest factor', 'Absolute max', 'Clearance factor']\n", 90 | "print('Features: ', features)" 91 | ] 92 | }, 93 | { 94 | "cell_type": "markdown", 95 | "metadata": {}, 96 | "source": [ 97 | "##### Random Forest" 98 | ] 99 | }, 100 | { 101 | "cell_type": "code", 102 | "execution_count": 6, 103 | "metadata": {}, 104 | "outputs": [ 105 | { 106 | "name": "stdout", 107 | "output_type": "stream", 108 | "text": [ 109 | "Fold-wise accuracies: [0.90178571 0.92678571 0.86607143 0.87142857 0.90892857]\n", 110 | "Mean accuracy: 0.8949999999999999\n" 111 | ] 112 | } 113 | ], 114 | "source": [ 115 | "RF=RandomForestClassifier(n_estimators=15,random_state=42)\n", 116 | "score = cross_val_score(RF, x, y, cv=5)\n", 117 | "print('Fold-wise accuracies: ', score)\n", 118 | "print('Mean accuracy: ', np.mean(np.array(score)))" 119 | ] 120 | }, 121 | { 122 | "cell_type": "markdown", 123 | "metadata": {}, 124 | "source": [ 125 | "##### Support Vector Classifier" 126 | ] 127 | }, 128 | { 129 | "cell_type": "code", 130 | "execution_count": 7, 131 | "metadata": {}, 132 | "outputs": [ 133 | { 134 | "name": "stdout", 135 | "output_type": "stream", 136 | "text": [ 137 | "Fold-wise accuracies: [0.69285714 0.69107143 0.67857143 0.70178571 0.69464286]\n", 138 | "Mean accuracy: 0.6917857142857142\n" 139 | ] 140 | } 141 | ], 142 | "source": [ 143 | "SVC=SVC(random_state=100, tol=1e-1)\n", 144 | "score = cross_val_score(SVC, x, y, cv=5)\n", 145 | "print('Fold-wise accuracies: ', score)\n", 146 | "print('Mean accuracy: ', np.mean(np.array(score)))" 147 | ] 148 | }, 149 | { 150 | "cell_type": "markdown", 151 | "metadata": {}, 152 | "source": [ 153 | "### Logistic Regression" 154 | ] 155 | }, 156 | { 157 | "cell_type": "code", 158 | "execution_count": 8, 159 | "metadata": {}, 160 | "outputs": [ 161 | { 162 | "name": "stdout", 163 | "output_type": "stream", 164 | "text": [ 165 | "Fold-wise accuracies: [0.75 0.78035714 0.74107143 0.73035714 0.76785714]\n", 166 | "Mean accuracy: 0.7539285714285714\n" 167 | ] 168 | } 169 | ], 170 | "source": [ 171 | "LR=LogisticRegression(random_state=6, solver='lbfgs',multi_class='multinomial')\n", 172 | "score = cross_val_score(LR, x, y, cv=5)\n", 173 | "print('Fold-wise accuracies: ', score)\n", 174 | "print('Mean accuracy: ', np.mean(np.array(score)))" 175 | ] 176 | }, 177 | { 178 | "cell_type": "markdown", 179 | "metadata": {}, 180 | "source": [ 181 | "##### K Nearest Neighbors Classifier" 182 | ] 183 | }, 184 | { 185 | "cell_type": "code", 186 | "execution_count": 9, 187 | "metadata": {}, 188 | "outputs": [ 189 | { 190 | "name": "stdout", 191 | "output_type": "stream", 192 | "text": [ 193 | "Fold-wise accuracies: [0.7125 0.73035714 0.69107143 0.70357143 0.7125 ]\n", 194 | "Mean accuracy: 0.71\n" 195 | ] 196 | } 197 | ], 198 | "source": [ 199 | "knn=KNeighborsClassifier(n_neighbors=75)\n", 200 | "score = cross_val_score(knn, x, y, cv=5)\n", 201 | "print('Fold-wise accuracies: ', score)\n", 202 | "print('Mean accuracy: ', np.mean(np.array(score)))" 203 | ] 204 | }, 205 | { 206 | "cell_type": "markdown", 207 | "metadata": {}, 208 | "source": [ 209 | "##### Multi-Layer Perceptron Classifier" 210 | ] 211 | }, 212 | { 213 | "cell_type": "code", 214 | "execution_count": 10, 215 | "metadata": {}, 216 | "outputs": [ 217 | { 218 | "name": "stdout", 219 | "output_type": "stream", 220 | "text": [ 221 | "Fold-wise accuracies: [0.78392857 0.80892857 0.8 0.76428571 0.82678571]\n", 222 | "Mean accuracy: 0.7967857142857143\n" 223 | ] 224 | } 225 | ], 226 | "source": [ 227 | "mlpc=MLPClassifier(solver='lbfgs', alpha=1e-5,hidden_layer_sizes=(512), random_state=10)\n", 228 | "score = cross_val_score(mlpc, x, y, cv=5)\n", 229 | "print('Fold-wise accuracies: ', score)\n", 230 | "print('Mean accuracy: ', np.mean(np.array(score)))" 231 | ] 232 | }, 233 | { 234 | "cell_type": "markdown", 235 | "metadata": {}, 236 | "source": [ 237 | "##### Similarly, Paderborn University data can be imported and tested" 238 | ] 239 | } 240 | ], 241 | "metadata": { 242 | "kernelspec": { 243 | "display_name": "Python 3", 244 | "language": "python", 245 | "name": "python3" 246 | }, 247 | "language_info": { 248 | "codemirror_mode": { 249 | "name": "ipython", 250 | "version": 3 251 | }, 252 | "file_extension": ".py", 253 | "mimetype": "text/x-python", 254 | "name": "python", 255 | "nbconvert_exporter": "python", 256 | "pygments_lexer": "ipython3", 257 | "version": "3.8.3" 258 | } 259 | }, 260 | "nbformat": 4, 261 | "nbformat_minor": 4 262 | } 263 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # About the project 2 | This is the repository of FaultNet: A CNN for bearing fault detection and classification. 3 | The increased presence of advanced sensors on the production floors has led to the collection of datasets that can provide significant insights into machine health. An important and reliable indicator of machine health, vibration signal data can provide us a greater understanding of different faults occurring in mechanical systems. In this work, we analyze vibration signal data of mechanical systems with bearings by combining different signal processing methods and coupling them with machine learning techniques to classify different types of bearing faults. We also highlight the importance of using different signal processing methods and analyze their effect on accuracy for bearing fault detection. Apart from the traditional machine learning algorithms we also propose a convolutional neural network FaultNet which can effectively determine the type of bearing fault with a high degree of accuracy. The distinguishing factor of this work is the idea of channels proposed to extract more information from the signal, we have stacked the ‘Mean’ and ‘Median’ channels to raw signal to extract more useful features to classify the signals with greater accuracy. 4 | 5 | # Datasets 6 | There are two datasets that have been used. 7 | 1. Case Westerm Reserve University Bearing Dataset (CWRU) 8 | 2. Paderborn University Dataset 9 | 10 | # CNN Architecture 11 | ![](images/cnn.png) 12 | 13 | # Results 14 | ![](images/results.png) 15 | ![](images/results_paderborn.png) 16 | 17 | 18 | To download the featurized data directly and for more information, visit our website, [ManufacturingNet.io.](http://manufacturingnet.io/) 19 | Please cite CWRU and Paderborn University if you use the raw data. 20 | 21 | 22 | 23 | 24 | 25 | The preprint is available here: https://arxiv.org/abs/2010.02146. 26 | -------------------------------------------------------------------------------- /Testing-CWRU-SNR.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "# Importing all the required libraries\n", 10 | "import pandas as pd\n", 11 | "import numpy as np\n", 12 | "from sklearn.model_selection import KFold\n", 13 | "from sklearn.model_selection import train_test_split\n", 14 | "from scipy.stats import kurtosis\n", 15 | "from scipy.stats import skew" 16 | ] 17 | }, 18 | { 19 | "cell_type": "code", 20 | "execution_count": 2, 21 | "metadata": {}, 22 | "outputs": [], 23 | "source": [ 24 | "# Importing the data\n", 25 | "data = np.load('CWRU_dataset.npy')\n", 26 | "label = np.load('CWRU_lables.npy').reshape(2800,1)" 27 | ] 28 | }, 29 | { 30 | "cell_type": "code", 31 | "execution_count": 3, 32 | "metadata": {}, 33 | "outputs": [ 34 | { 35 | "name": "stdout", 36 | "output_type": "stream", 37 | "text": [ 38 | "-4.53178939269873 8.912316771674277 -17.347619303686315\n", 39 | "-2.8765261920516005 10.614111083274357 -15.606513853349128\n", 40 | "-0.6501830075600307 12.780734773037395 -13.472719710796694\n", 41 | "2.6121302086704725 16.05304041915961 -10.249454358025131\n", 42 | "4.983190366141585 18.452000618669377 -7.825380660794393\n", 43 | "8.380983795925037 21.814525899787782 -4.45959680283449\n", 44 | "10.820503024412867 24.263957885115612 -2.022623637711112\n" 45 | ] 46 | } 47 | ], 48 | "source": [ 49 | "# Generating white noise\n", 50 | "# Calculating the SNR\n", 51 | "white_noise_1 = np.clip(np.random.normal(0, 1, data.shape), -0.5, 0.5) # -4.5\n", 52 | "signal_energy = np.mean(data**2, axis = 1)\n", 53 | "noise_energy = np.mean(white_noise_1**2, axis = 1)\n", 54 | "SNR = 10*np.log10(signal_energy/noise_energy)\n", 55 | "print(np.mean(SNR), np.max(SNR), np.min(SNR))\n", 56 | "###\n", 57 | "white_noise_2 = np.clip(np.random.normal(0, 1, data.shape), -0.4, 0.4) # -2.9\n", 58 | "signal_energy = np.mean(data**2, axis = 1)\n", 59 | "noise_energy = np.mean(white_noise_2**2, axis = 1)\n", 60 | "SNR = 10*np.log10(signal_energy/noise_energy)\n", 61 | "print(np.mean(SNR), np.max(SNR), np.min(SNR))\n", 62 | "###\n", 63 | "white_noise_3 = np.clip(np.random.normal(0, 1, data.shape), -0.3, 0.3) # -0.6\n", 64 | "signal_energy = np.mean(data**2, axis = 1)\n", 65 | "noise_energy = np.mean(white_noise_3**2, axis = 1)\n", 66 | "SNR = 10*np.log10(signal_energy/noise_energy)\n", 67 | "print(np.mean(SNR), np.max(SNR), np.min(SNR))\n", 68 | "###\n", 69 | "white_noise_4 = np.clip(np.random.normal(0, 1, data.shape), -0.2, 0.2) # 2.6\n", 70 | "signal_energy = np.mean(data**2, axis = 1)\n", 71 | "noise_energy = np.mean(white_noise_4**2, axis = 1)\n", 72 | "SNR = 10*np.log10(signal_energy/noise_energy)\n", 73 | "print(np.mean(SNR), np.max(SNR), np.min(SNR))\n", 74 | "###\n", 75 | "white_noise_5 = np.clip(np.random.normal(0, 1, data.shape), -0.15, 0.15) # 5\n", 76 | "signal_energy = np.mean(data**2, axis = 1)\n", 77 | "noise_energy = np.mean(white_noise_5**2, axis = 1)\n", 78 | "SNR = 10*np.log10(signal_energy/noise_energy)\n", 79 | "print(np.mean(SNR), np.max(SNR), np.min(SNR))\n", 80 | "###\n", 81 | "white_noise_6 = np.clip(np.random.normal(0, 1, data.shape), -0.1, 0.1) # 8.4\n", 82 | "signal_energy = np.mean(data**2, axis = 1)\n", 83 | "noise_energy = np.mean(white_noise_6**2, axis = 1)\n", 84 | "SNR = 10*np.log10(signal_energy/noise_energy)\n", 85 | "print(np.mean(SNR), np.max(SNR), np.min(SNR))\n", 86 | "###\n", 87 | "white_noise_7 = np.clip(np.random.normal(0, 1, data.shape), -0.075, 0.075) # 10.8\n", 88 | "signal_energy = np.mean(data**2, axis = 1)\n", 89 | "noise_energy = np.mean(white_noise_7**2, axis = 1)\n", 90 | "SNR = 10*np.log10(signal_energy/noise_energy)\n", 91 | "print(np.mean(SNR), np.max(SNR), np.min(SNR))" 92 | ] 93 | }, 94 | { 95 | "cell_type": "code", 96 | "execution_count": 4, 97 | "metadata": {}, 98 | "outputs": [], 99 | "source": [ 100 | "# Noisy data\n", 101 | "data1 = data + white_noise_1\n", 102 | "data2 = data + white_noise_2\n", 103 | "data3 = data + white_noise_3\n", 104 | "data4 = data + white_noise_4\n", 105 | "data5 = data + white_noise_5\n", 106 | "data6 = data + white_noise_6\n", 107 | "data7 = data + white_noise_7\n", 108 | "del data" 109 | ] 110 | }, 111 | { 112 | "cell_type": "code", 113 | "execution_count": 5, 114 | "metadata": {}, 115 | "outputs": [], 116 | "source": [ 117 | "# Saving noisy data\n", 118 | "np.save('noisy_data_1.npy',data1)\n", 119 | "np.save('noisy_data_2.npy',data2)\n", 120 | "np.save('noisy_data_3.npy',data3)\n", 121 | "np.save('noisy_data_4.npy',data4)\n", 122 | "np.save('noisy_data_5.npy',data5)\n", 123 | "np.save('noisy_data_6.npy',data6)\n", 124 | "np.save('noisy_data_7.npy',data7)" 125 | ] 126 | }, 127 | { 128 | "cell_type": "code", 129 | "execution_count": 6, 130 | "metadata": {}, 131 | "outputs": [], 132 | "source": [ 133 | "from sklearn.svm import SVC\n", 134 | "from sklearn.ensemble import RandomForestClassifier\n", 135 | "from sklearn.linear_model import LogisticRegression\n", 136 | "from sklearn.neighbors import KNeighborsClassifier\n", 137 | "from sklearn.neural_network import MLPClassifier\n", 138 | "from sklearn.model_selection import cross_val_score" 139 | ] 140 | }, 141 | { 142 | "cell_type": "code", 143 | "execution_count": 7, 144 | "metadata": {}, 145 | "outputs": [], 146 | "source": [ 147 | "import warnings\n", 148 | "warnings.filterwarnings(\"ignore\")" 149 | ] 150 | }, 151 | { 152 | "cell_type": "code", 153 | "execution_count": 8, 154 | "metadata": {}, 155 | "outputs": [], 156 | "source": [ 157 | "def mean(data):\n", 158 | " data = np.asarray(data)\n", 159 | " data.flatten()\n", 160 | " M=data.mean()\n", 161 | " return M\n", 162 | "def Max(data):\n", 163 | " data = np.asarray(data)\n", 164 | " data.flatten()\n", 165 | " Max1=data.max()\n", 166 | " return Max1\n", 167 | "def Min(data):\n", 168 | " data = np.asarray(data)\n", 169 | " data.flatten()\n", 170 | " Min1=data.min()\n", 171 | " return Min1\n", 172 | "def pp(data):\n", 173 | " data = np.asarray(data)\n", 174 | " data.flatten()\n", 175 | " PP=data.max()-data.min()\n", 176 | " return PP\n", 177 | "def Variance(data):\n", 178 | " data = np.asarray(data)\n", 179 | " data.flatten()\n", 180 | " Var=data.var()\n", 181 | " return Var\n", 182 | "def rms(data):\n", 183 | " data = np.asarray(data)\n", 184 | " data.flatten()\n", 185 | " Rms=np.sqrt(np.mean(data**2))\n", 186 | " return Rms\n", 187 | "def Ab_mean(data):\n", 188 | " data = np.asarray(data)\n", 189 | " data.flatten()\n", 190 | " Abm=np.mean(np.absolute(data))\n", 191 | " return Abm\n", 192 | "def Shapef(data):\n", 193 | " data = np.asarray(data)\n", 194 | " data.flatten()\n", 195 | " shapef=rms(data)/Ab_mean(data)\n", 196 | " return shapef\n", 197 | "def Impulsef(data):\n", 198 | " data = np.asarray(data)\n", 199 | " data.flatten()\n", 200 | " impulse=Max(data)/Ab_mean(data)\n", 201 | " return impulse\n", 202 | "def crestf(data):\n", 203 | " data = np.asarray(data)\n", 204 | " data.flatten()\n", 205 | " crest=Max(data)/rms(data)\n", 206 | " return crest\n", 207 | "def SQRT_AMPL(data):\n", 208 | " data = np.asarray(data)\n", 209 | " data.flatten()\n", 210 | " SQRTA=(np.mean(np.sqrt(np.absolute(data))))**2\n", 211 | " return SQRTA\n", 212 | "def clearancef(data):\n", 213 | " data = np.asarray(data)\n", 214 | " data.flatten()\n", 215 | " clrf=Max(data)/SQRT_AMPL(data)\n", 216 | " return clrf" 217 | ] 218 | }, 219 | { 220 | "cell_type": "markdown", 221 | "metadata": {}, 222 | "source": [ 223 | "Noisy Data 1" 224 | ] 225 | }, 226 | { 227 | "cell_type": "code", 228 | "execution_count": 9, 229 | "metadata": {}, 230 | "outputs": [ 231 | { 232 | "name": "stdout", 233 | "output_type": "stream", 234 | "text": [ 235 | "Random Forest\n", 236 | "[0.74642857 0.75714286 0.73928571 0.76428571 0.77142857]\n", 237 | "0.7557142857142857\n", 238 | "**********\n", 239 | "Support Vector Classifier\n", 240 | "[0.62678571 0.60892857 0.60357143 0.61071429 0.62857143]\n", 241 | "0.6157142857142858\n", 242 | "**********\n", 243 | "Logistic Regression\n", 244 | "[0.65714286 0.65178571 0.66785714 0.68928571 0.69821429]\n", 245 | "0.6728571428571428\n", 246 | "**********\n", 247 | "K Nearest Neighbors\n", 248 | "[0.67857143 0.66964286 0.65892857 0.6875 0.68928571]\n", 249 | "0.6767857142857142\n", 250 | "**********\n", 251 | "MLP\n", 252 | "[0.72857143 0.7125 0.71964286 0.72678571 0.7375 ]\n", 253 | "0.725\n", 254 | "**********\n" 255 | ] 256 | } 257 | ], 258 | "source": [ 259 | "data = data1\n", 260 | "feature = []\n", 261 | "for i in range(2800):\n", 262 | " feature.append(mean(data[i]))\n", 263 | " feature.append(Max(data[i]))\n", 264 | " feature.append(Min(data[i]))\n", 265 | " feature.append(pp(data[i]))\n", 266 | " feature.append(Variance(data[i]))\n", 267 | " feature.append(rms(data[i]))\n", 268 | " feature.append(Ab_mean(data[i]))\n", 269 | " feature.append(Shapef(data[i]))\n", 270 | " feature.append(Impulsef(data[i]))\n", 271 | " feature.append(crestf(data[i]))\n", 272 | " feature.append(max(abs(data[i])))\n", 273 | " feature.append(clearancef(data[i]))\n", 274 | " K=kurtosis(data[i])\n", 275 | " feature.append(K)\n", 276 | " S=skew(data[i], axis=0, bias=False)\n", 277 | " feature.append(S)\n", 278 | "data_f = np.array(feature).reshape(2800,14)\n", 279 | "x = data_f\n", 280 | "y = label.ravel()\n", 281 | "print(\"Random Forest\")\n", 282 | "RF=RandomForestClassifier(n_estimators=15,random_state=42)\n", 283 | "RF.fit(x,y)\n", 284 | "score = cross_val_score(RF, x, y, cv=5)\n", 285 | "print(score)\n", 286 | "print(np.mean(np.array(score)))\n", 287 | "print('*'*10)\n", 288 | "print(\"Support Vector Classifier\")\n", 289 | "svc=SVC(random_state=100, tol=1e-1)\n", 290 | "score = cross_val_score(svc, x, y, cv=5)\n", 291 | "print(score)\n", 292 | "print(np.mean(np.array(score)))\n", 293 | "print('*'*10)\n", 294 | "print(\"Logistic Regression\")\n", 295 | "LR=LogisticRegression(random_state=6, solver='lbfgs',multi_class='multinomial')\n", 296 | "score = cross_val_score(LR, x, y, cv=5)\n", 297 | "print(score)\n", 298 | "print(np.mean(np.array(score)))\n", 299 | "print('*'*10)\n", 300 | "print('K Nearest Neighbors')\n", 301 | "knn=KNeighborsClassifier(n_neighbors=75)\n", 302 | "score = cross_val_score(knn, x, y, cv=5)\n", 303 | "print(score)\n", 304 | "print(np.mean(np.array(score)))\n", 305 | "print('*'*10)\n", 306 | "print(\"MLP\")\n", 307 | "mlpc=MLPClassifier(solver='lbfgs', alpha=1e-5,hidden_layer_sizes=(512), random_state=10)\n", 308 | "score = cross_val_score(mlpc, x, y, cv=5)\n", 309 | "print(score)\n", 310 | "print(np.mean(np.array(score)))\n", 311 | "print('*'*10)" 312 | ] 313 | }, 314 | { 315 | "cell_type": "markdown", 316 | "metadata": {}, 317 | "source": [ 318 | "Noisy Data 2" 319 | ] 320 | }, 321 | { 322 | "cell_type": "code", 323 | "execution_count": 10, 324 | "metadata": {}, 325 | "outputs": [ 326 | { 327 | "name": "stdout", 328 | "output_type": "stream", 329 | "text": [ 330 | "Random Forest\n", 331 | "[0.78928571 0.75892857 0.77321429 0.78214286 0.7625 ]\n", 332 | "0.7732142857142857\n", 333 | "**********\n", 334 | "Support Vector Classifier\n", 335 | "[0.64107143 0.63035714 0.60892857 0.625 0.62857143]\n", 336 | "0.6267857142857143\n", 337 | "**********\n", 338 | "Logistic Regression\n", 339 | "[0.68571429 0.70535714 0.71071429 0.71428571 0.72142857]\n", 340 | "0.7075000000000001\n", 341 | "**********\n", 342 | "K Nearest Neighbors\n", 343 | "[0.7 0.71428571 0.675 0.68392857 0.66964286]\n", 344 | "0.6885714285714286\n", 345 | "**********\n", 346 | "MLP\n", 347 | "[0.75535714 0.76607143 0.75 0.74464286 0.75357143]\n", 348 | "0.7539285714285714\n", 349 | "**********\n" 350 | ] 351 | } 352 | ], 353 | "source": [ 354 | "data = data2\n", 355 | "feature = []\n", 356 | "for i in range(2800):\n", 357 | " feature.append(mean(data[i]))\n", 358 | " feature.append(Max(data[i]))\n", 359 | " feature.append(Min(data[i]))\n", 360 | " feature.append(pp(data[i]))\n", 361 | " feature.append(Variance(data[i]))\n", 362 | " feature.append(rms(data[i]))\n", 363 | " feature.append(Ab_mean(data[i]))\n", 364 | " feature.append(Shapef(data[i]))\n", 365 | " feature.append(Impulsef(data[i]))\n", 366 | " feature.append(crestf(data[i]))\n", 367 | " feature.append(max(abs(data[i])))\n", 368 | " feature.append(clearancef(data[i]))\n", 369 | " K=kurtosis(data[i])\n", 370 | " feature.append(K)\n", 371 | " S=skew(data[i], axis=0, bias=False)\n", 372 | " feature.append(S)\n", 373 | "data_f = np.array(feature).reshape(2800,14)\n", 374 | "x = data_f\n", 375 | "y = label.ravel()\n", 376 | "print(\"Random Forest\")\n", 377 | "RF=RandomForestClassifier(n_estimators=15,random_state=42)\n", 378 | "RF.fit(x,y)\n", 379 | "score = cross_val_score(RF, x, y, cv=5)\n", 380 | "print(score)\n", 381 | "print(np.mean(np.array(score)))\n", 382 | "print('*'*10)\n", 383 | "print(\"Support Vector Classifier\")\n", 384 | "svc=SVC(random_state=100, tol=1e-1)\n", 385 | "score = cross_val_score(svc, x, y, cv=5)\n", 386 | "print(score)\n", 387 | "print(np.mean(np.array(score)))\n", 388 | "print('*'*10)\n", 389 | "print(\"Logistic Regression\")\n", 390 | "LR=LogisticRegression(random_state=6, solver='lbfgs',multi_class='multinomial')\n", 391 | "score = cross_val_score(LR, x, y, cv=5)\n", 392 | "print(score)\n", 393 | "print(np.mean(np.array(score)))\n", 394 | "print('*'*10)\n", 395 | "print('K Nearest Neighbors')\n", 396 | "knn=KNeighborsClassifier(n_neighbors=75)\n", 397 | "score = cross_val_score(knn, x, y, cv=5)\n", 398 | "print(score)\n", 399 | "print(np.mean(np.array(score)))\n", 400 | "print('*'*10)\n", 401 | "print(\"MLP\")\n", 402 | "mlpc=MLPClassifier(solver='lbfgs', alpha=1e-5,hidden_layer_sizes=(512), random_state=10)\n", 403 | "score = cross_val_score(mlpc, x, y, cv=5)\n", 404 | "print(score)\n", 405 | "print(np.mean(np.array(score)))\n", 406 | "print('*'*10)" 407 | ] 408 | }, 409 | { 410 | "cell_type": "markdown", 411 | "metadata": {}, 412 | "source": [ 413 | "Noisy Data 3" 414 | ] 415 | }, 416 | { 417 | "cell_type": "code", 418 | "execution_count": 11, 419 | "metadata": {}, 420 | "outputs": [ 421 | { 422 | "name": "stdout", 423 | "output_type": "stream", 424 | "text": [ 425 | "Random Forest\n", 426 | "[0.79464286 0.79821429 0.78928571 0.77857143 0.80714286]\n", 427 | "0.7935714285714285\n", 428 | "**********\n", 429 | "Support Vector Classifier\n", 430 | "[0.64642857 0.63928571 0.61607143 0.64821429 0.63571429]\n", 431 | "0.6371428571428571\n", 432 | "**********\n", 433 | "Logistic Regression\n", 434 | "[0.70178571 0.71607143 0.72142857 0.71607143 0.73214286]\n", 435 | "0.7175\n", 436 | "**********\n", 437 | "K Nearest Neighbors\n", 438 | "[0.68214286 0.69107143 0.675 0.69285714 0.68392857]\n", 439 | "0.685\n", 440 | "**********\n", 441 | "MLP\n", 442 | "[0.76607143 0.74642857 0.74642857 0.75178571 0.78035714]\n", 443 | "0.7582142857142858\n", 444 | "**********\n" 445 | ] 446 | } 447 | ], 448 | "source": [ 449 | "data = data3\n", 450 | "feature = []\n", 451 | "for i in range(2800):\n", 452 | " feature.append(mean(data[i]))\n", 453 | " feature.append(Max(data[i]))\n", 454 | " feature.append(Min(data[i]))\n", 455 | " feature.append(pp(data[i]))\n", 456 | " feature.append(Variance(data[i]))\n", 457 | " feature.append(rms(data[i]))\n", 458 | " feature.append(Ab_mean(data[i]))\n", 459 | " feature.append(Shapef(data[i]))\n", 460 | " feature.append(Impulsef(data[i]))\n", 461 | " feature.append(crestf(data[i]))\n", 462 | " feature.append(max(abs(data[i])))\n", 463 | " feature.append(clearancef(data[i]))\n", 464 | " K=kurtosis(data[i])\n", 465 | " feature.append(K)\n", 466 | " S=skew(data[i], axis=0, bias=False)\n", 467 | " feature.append(S)\n", 468 | "data_f = np.array(feature).reshape(2800,14)\n", 469 | "x = data_f\n", 470 | "y = label.ravel()\n", 471 | "print(\"Random Forest\")\n", 472 | "RF=RandomForestClassifier(n_estimators=15,random_state=42)\n", 473 | "RF.fit(x,y)\n", 474 | "score = cross_val_score(RF, x, y, cv=5)\n", 475 | "print(score)\n", 476 | "print(np.mean(np.array(score)))\n", 477 | "print('*'*10)\n", 478 | "print(\"Support Vector Classifier\")\n", 479 | "svc=SVC(random_state=100, tol=1e-1)\n", 480 | "score = cross_val_score(svc, x, y, cv=5)\n", 481 | "print(score)\n", 482 | "print(np.mean(np.array(score)))\n", 483 | "print('*'*10)\n", 484 | "print(\"Logistic Regression\")\n", 485 | "LR=LogisticRegression(random_state=6, solver='lbfgs',multi_class='multinomial')\n", 486 | "score = cross_val_score(LR, x, y, cv=5)\n", 487 | "print(score)\n", 488 | "print(np.mean(np.array(score)))\n", 489 | "print('*'*10)\n", 490 | "print('K Nearest Neighbors')\n", 491 | "knn=KNeighborsClassifier(n_neighbors=75)\n", 492 | "score = cross_val_score(knn, x, y, cv=5)\n", 493 | "print(score)\n", 494 | "print(np.mean(np.array(score)))\n", 495 | "print('*'*10)\n", 496 | "print(\"MLP\")\n", 497 | "mlpc=MLPClassifier(solver='lbfgs', alpha=1e-5,hidden_layer_sizes=(512), random_state=10)\n", 498 | "score = cross_val_score(mlpc, x, y, cv=5)\n", 499 | "print(score)\n", 500 | "print(np.mean(np.array(score)))\n", 501 | "print('*'*10)" 502 | ] 503 | }, 504 | { 505 | "cell_type": "markdown", 506 | "metadata": {}, 507 | "source": [ 508 | "Noisy Data 4" 509 | ] 510 | }, 511 | { 512 | "cell_type": "code", 513 | "execution_count": 12, 514 | "metadata": {}, 515 | "outputs": [ 516 | { 517 | "name": "stdout", 518 | "output_type": "stream", 519 | "text": [ 520 | "Random Forest\n", 521 | "[0.81071429 0.80357143 0.81785714 0.78928571 0.83035714]\n", 522 | "0.8103571428571428\n", 523 | "**********\n", 524 | "Support Vector Classifier\n", 525 | "[0.675 0.66785714 0.64107143 0.65714286 0.65535714]\n", 526 | "0.6592857142857144\n", 527 | "**********\n", 528 | "Logistic Regression\n", 529 | "[0.725 0.71785714 0.72142857 0.73928571 0.73214286]\n", 530 | "0.7271428571428571\n", 531 | "**********\n", 532 | "K Nearest Neighbors\n", 533 | "[0.71071429 0.69464286 0.675 0.70178571 0.69821429]\n", 534 | "0.6960714285714287\n", 535 | "**********\n", 536 | "MLP\n", 537 | "[0.76964286 0.75714286 0.75892857 0.75357143 0.77678571]\n", 538 | "0.7632142857142857\n", 539 | "**********\n" 540 | ] 541 | } 542 | ], 543 | "source": [ 544 | "data = data4\n", 545 | "feature = []\n", 546 | "for i in range(2800):\n", 547 | " feature.append(mean(data[i]))\n", 548 | " feature.append(Max(data[i]))\n", 549 | " feature.append(Min(data[i]))\n", 550 | " feature.append(pp(data[i]))\n", 551 | " feature.append(Variance(data[i]))\n", 552 | " feature.append(rms(data[i]))\n", 553 | " feature.append(Ab_mean(data[i]))\n", 554 | " feature.append(Shapef(data[i]))\n", 555 | " feature.append(Impulsef(data[i]))\n", 556 | " feature.append(crestf(data[i]))\n", 557 | " feature.append(max(abs(data[i])))\n", 558 | " feature.append(clearancef(data[i]))\n", 559 | " K=kurtosis(data[i])\n", 560 | " feature.append(K)\n", 561 | " S=skew(data[i], axis=0, bias=False)\n", 562 | " feature.append(S)\n", 563 | "data_f = np.array(feature).reshape(2800,14)\n", 564 | "x = data_f\n", 565 | "y = label.ravel()\n", 566 | "print(\"Random Forest\")\n", 567 | "RF=RandomForestClassifier(n_estimators=15,random_state=42)\n", 568 | "RF.fit(x,y)\n", 569 | "score = cross_val_score(RF, x, y, cv=5)\n", 570 | "print(score)\n", 571 | "print(np.mean(np.array(score)))\n", 572 | "print('*'*10)\n", 573 | "print(\"Support Vector Classifier\")\n", 574 | "svc=SVC(random_state=100, tol=1e-1)\n", 575 | "score = cross_val_score(svc, x, y, cv=5)\n", 576 | "print(score)\n", 577 | "print(np.mean(np.array(score)))\n", 578 | "print('*'*10)\n", 579 | "print(\"Logistic Regression\")\n", 580 | "LR=LogisticRegression(random_state=6, solver='lbfgs',multi_class='multinomial')\n", 581 | "score = cross_val_score(LR, x, y, cv=5)\n", 582 | "print(score)\n", 583 | "print(np.mean(np.array(score)))\n", 584 | "print('*'*10)\n", 585 | "print('K Nearest Neighbors')\n", 586 | "knn=KNeighborsClassifier(n_neighbors=75)\n", 587 | "score = cross_val_score(knn, x, y, cv=5)\n", 588 | "print(score)\n", 589 | "print(np.mean(np.array(score)))\n", 590 | "print('*'*10)\n", 591 | "print(\"MLP\")\n", 592 | "mlpc=MLPClassifier(solver='lbfgs', alpha=1e-5,hidden_layer_sizes=(512), random_state=10)\n", 593 | "score = cross_val_score(mlpc, x, y, cv=5)\n", 594 | "print(score)\n", 595 | "print(np.mean(np.array(score)))\n", 596 | "print('*'*10)" 597 | ] 598 | }, 599 | { 600 | "cell_type": "markdown", 601 | "metadata": {}, 602 | "source": [ 603 | "Noisy Data 5" 604 | ] 605 | }, 606 | { 607 | "cell_type": "code", 608 | "execution_count": 13, 609 | "metadata": {}, 610 | "outputs": [ 611 | { 612 | "name": "stdout", 613 | "output_type": "stream", 614 | "text": [ 615 | "Random Forest\n", 616 | "[0.82321429 0.82321429 0.82321429 0.81071429 0.84464286]\n", 617 | "0.825\n", 618 | "**********\n", 619 | "Support Vector Classifier\n", 620 | "[0.68392857 0.675 0.67678571 0.66964286 0.675 ]\n", 621 | "0.6760714285714287\n", 622 | "**********\n", 623 | "Logistic Regression\n", 624 | "[0.71607143 0.74821429 0.71607143 0.73392857 0.73035714]\n", 625 | "0.7289285714285714\n", 626 | "**********\n", 627 | "K Nearest Neighbors\n", 628 | "[0.69821429 0.69285714 0.70178571 0.71071429 0.69821429]\n", 629 | "0.7003571428571428\n", 630 | "**********\n", 631 | "MLP\n", 632 | "[0.76785714 0.77321429 0.76428571 0.76607143 0.78928571]\n", 633 | "0.7721428571428571\n", 634 | "**********\n" 635 | ] 636 | } 637 | ], 638 | "source": [ 639 | "data = data5\n", 640 | "feature = []\n", 641 | "for i in range(2800):\n", 642 | " feature.append(mean(data[i]))\n", 643 | " feature.append(Max(data[i]))\n", 644 | " feature.append(Min(data[i]))\n", 645 | " feature.append(pp(data[i]))\n", 646 | " feature.append(Variance(data[i]))\n", 647 | " feature.append(rms(data[i]))\n", 648 | " feature.append(Ab_mean(data[i]))\n", 649 | " feature.append(Shapef(data[i]))\n", 650 | " feature.append(Impulsef(data[i]))\n", 651 | " feature.append(crestf(data[i]))\n", 652 | " feature.append(max(abs(data[i])))\n", 653 | " feature.append(clearancef(data[i]))\n", 654 | " K=kurtosis(data[i])\n", 655 | " feature.append(K)\n", 656 | " S=skew(data[i], axis=0, bias=False)\n", 657 | " feature.append(S)\n", 658 | "data_f = np.array(feature).reshape(2800,14)\n", 659 | "x = data_f\n", 660 | "y = label.ravel()\n", 661 | "print(\"Random Forest\")\n", 662 | "RF=RandomForestClassifier(n_estimators=15,random_state=42)\n", 663 | "RF.fit(x,y)\n", 664 | "score = cross_val_score(RF, x, y, cv=5)\n", 665 | "print(score)\n", 666 | "print(np.mean(np.array(score)))\n", 667 | "print('*'*10)\n", 668 | "print(\"Support Vector Classifier\")\n", 669 | "svc=SVC(random_state=100, tol=1e-1)\n", 670 | "score = cross_val_score(svc, x, y, cv=5)\n", 671 | "print(score)\n", 672 | "print(np.mean(np.array(score)))\n", 673 | "print('*'*10)\n", 674 | "print(\"Logistic Regression\")\n", 675 | "LR=LogisticRegression(random_state=6, solver='lbfgs',multi_class='multinomial')\n", 676 | "score = cross_val_score(LR, x, y, cv=5)\n", 677 | "print(score)\n", 678 | "print(np.mean(np.array(score)))\n", 679 | "print('*'*10)\n", 680 | "print('K Nearest Neighbors')\n", 681 | "knn=KNeighborsClassifier(n_neighbors=75)\n", 682 | "score = cross_val_score(knn, x, y, cv=5)\n", 683 | "print(score)\n", 684 | "print(np.mean(np.array(score)))\n", 685 | "print('*'*10)\n", 686 | "print(\"MLP\")\n", 687 | "mlpc=MLPClassifier(solver='lbfgs', alpha=1e-5,hidden_layer_sizes=(512), random_state=10)\n", 688 | "score = cross_val_score(mlpc, x, y, cv=5)\n", 689 | "print(score)\n", 690 | "print(np.mean(np.array(score)))\n", 691 | "print('*'*10)" 692 | ] 693 | }, 694 | { 695 | "cell_type": "markdown", 696 | "metadata": {}, 697 | "source": [ 698 | "Noisy Data 6" 699 | ] 700 | }, 701 | { 702 | "cell_type": "code", 703 | "execution_count": 14, 704 | "metadata": {}, 705 | "outputs": [ 706 | { 707 | "name": "stdout", 708 | "output_type": "stream", 709 | "text": [ 710 | "Random Forest\n", 711 | "[0.85178571 0.85714286 0.82678571 0.81071429 0.86785714]\n", 712 | "0.8428571428571429\n", 713 | "**********\n", 714 | "Support Vector Classifier\n", 715 | "[0.69642857 0.69821429 0.66607143 0.69107143 0.7 ]\n", 716 | "0.6903571428571429\n", 717 | "**********\n", 718 | "Logistic Regression\n", 719 | "[0.7375 0.74464286 0.73035714 0.72857143 0.74642857]\n", 720 | "0.7375\n", 721 | "**********\n", 722 | "K Nearest Neighbors\n", 723 | "[0.70357143 0.7125 0.7 0.71071429 0.69464286]\n", 724 | "0.7042857142857144\n", 725 | "**********\n", 726 | "MLP\n", 727 | "[0.78928571 0.78928571 0.75178571 0.74285714 0.79642857]\n", 728 | "0.7739285714285714\n", 729 | "**********\n" 730 | ] 731 | } 732 | ], 733 | "source": [ 734 | "data = data6\n", 735 | "feature = []\n", 736 | "for i in range(2800):\n", 737 | " feature.append(mean(data[i]))\n", 738 | " feature.append(Max(data[i]))\n", 739 | " feature.append(Min(data[i]))\n", 740 | " feature.append(pp(data[i]))\n", 741 | " feature.append(Variance(data[i]))\n", 742 | " feature.append(rms(data[i]))\n", 743 | " feature.append(Ab_mean(data[i]))\n", 744 | " feature.append(Shapef(data[i]))\n", 745 | " feature.append(Impulsef(data[i]))\n", 746 | " feature.append(crestf(data[i]))\n", 747 | " feature.append(max(abs(data[i])))\n", 748 | " feature.append(clearancef(data[i]))\n", 749 | " K=kurtosis(data[i])\n", 750 | " feature.append(K)\n", 751 | " S=skew(data[i], axis=0, bias=False)\n", 752 | " feature.append(S)\n", 753 | "data_f = np.array(feature).reshape(2800,14)\n", 754 | "x = data_f\n", 755 | "y = label.ravel()\n", 756 | "print(\"Random Forest\")\n", 757 | "RF=RandomForestClassifier(n_estimators=15,random_state=42)\n", 758 | "RF.fit(x,y)\n", 759 | "score = cross_val_score(RF, x, y, cv=5)\n", 760 | "print(score)\n", 761 | "print(np.mean(np.array(score)))\n", 762 | "print('*'*10)\n", 763 | "print(\"Support Vector Classifier\")\n", 764 | "svc=SVC(random_state=100, tol=1e-1)\n", 765 | "score = cross_val_score(svc, x, y, cv=5)\n", 766 | "print(score)\n", 767 | "print(np.mean(np.array(score)))\n", 768 | "print('*'*10)\n", 769 | "print(\"Logistic Regression\")\n", 770 | "LR=LogisticRegression(random_state=6, solver='lbfgs',multi_class='multinomial')\n", 771 | "score = cross_val_score(LR, x, y, cv=5)\n", 772 | "print(score)\n", 773 | "print(np.mean(np.array(score)))\n", 774 | "print('*'*10)\n", 775 | "print('K Nearest Neighbors')\n", 776 | "knn=KNeighborsClassifier(n_neighbors=75)\n", 777 | "score = cross_val_score(knn, x, y, cv=5)\n", 778 | "print(score)\n", 779 | "print(np.mean(np.array(score)))\n", 780 | "print('*'*10)\n", 781 | "print(\"MLP\")\n", 782 | "mlpc=MLPClassifier(solver='lbfgs', alpha=1e-5,hidden_layer_sizes=(512), random_state=10)\n", 783 | "score = cross_val_score(mlpc, x, y, cv=5)\n", 784 | "print(score)\n", 785 | "print(np.mean(np.array(score)))\n", 786 | "print('*'*10)" 787 | ] 788 | }, 789 | { 790 | "cell_type": "markdown", 791 | "metadata": {}, 792 | "source": [ 793 | "Noisy Data 7" 794 | ] 795 | }, 796 | { 797 | "cell_type": "code", 798 | "execution_count": 15, 799 | "metadata": {}, 800 | "outputs": [ 801 | { 802 | "name": "stdout", 803 | "output_type": "stream", 804 | "text": [ 805 | "Random Forest\n", 806 | "[0.86964286 0.87321429 0.83035714 0.86785714 0.86607143]\n", 807 | "0.8614285714285714\n", 808 | "**********\n", 809 | "Support Vector Classifier\n", 810 | "[0.69285714 0.69285714 0.67321429 0.70892857 0.68928571]\n", 811 | "0.6914285714285715\n", 812 | "**********\n", 813 | "Logistic Regression\n", 814 | "[0.73928571 0.75892857 0.71607143 0.74285714 0.75178571]\n", 815 | "0.7417857142857143\n", 816 | "**********\n", 817 | "K Nearest Neighbors\n", 818 | "[0.7 0.70178571 0.67142857 0.68928571 0.71071429]\n", 819 | "0.6946428571428572\n", 820 | "**********\n", 821 | "MLP\n", 822 | "[0.77321429 0.7875 0.75178571 0.75892857 0.78571429]\n", 823 | "0.7714285714285714\n", 824 | "**********\n" 825 | ] 826 | } 827 | ], 828 | "source": [ 829 | "data = data7\n", 830 | "feature = []\n", 831 | "for i in range(2800):\n", 832 | " feature.append(mean(data[i]))\n", 833 | " feature.append(Max(data[i]))\n", 834 | " feature.append(Min(data[i]))\n", 835 | " feature.append(pp(data[i]))\n", 836 | " feature.append(Variance(data[i]))\n", 837 | " feature.append(rms(data[i]))\n", 838 | " feature.append(Ab_mean(data[i]))\n", 839 | " feature.append(Shapef(data[i]))\n", 840 | " feature.append(Impulsef(data[i]))\n", 841 | " feature.append(crestf(data[i]))\n", 842 | " feature.append(max(abs(data[i])))\n", 843 | " feature.append(clearancef(data[i]))\n", 844 | " K=kurtosis(data[i])\n", 845 | " feature.append(K)\n", 846 | " S=skew(data[i], axis=0, bias=False)\n", 847 | " feature.append(S)\n", 848 | "data_f = np.array(feature).reshape(2800,14)\n", 849 | "x = data_f\n", 850 | "y = label.ravel()\n", 851 | "print(\"Random Forest\")\n", 852 | "RF=RandomForestClassifier(n_estimators=15,random_state=42)\n", 853 | "RF.fit(x,y)\n", 854 | "score = cross_val_score(RF, x, y, cv=5)\n", 855 | "print(score)\n", 856 | "print(np.mean(np.array(score)))\n", 857 | "print('*'*10)\n", 858 | "print(\"Support Vector Classifier\")\n", 859 | "svc=SVC(random_state=100, tol=1e-1)\n", 860 | "score = cross_val_score(svc, x, y, cv=5)\n", 861 | "print(score)\n", 862 | "print(np.mean(np.array(score)))\n", 863 | "print('*'*10)\n", 864 | "print(\"Logistic Regression\")\n", 865 | "LR=LogisticRegression(random_state=6, solver='lbfgs',multi_class='multinomial')\n", 866 | "score = cross_val_score(LR, x, y, cv=5)\n", 867 | "print(score)\n", 868 | "print(np.mean(np.array(score)))\n", 869 | "print('*'*10)\n", 870 | "print('K Nearest Neighbors')\n", 871 | "knn=KNeighborsClassifier(n_neighbors=75)\n", 872 | "score = cross_val_score(knn, x, y, cv=5)\n", 873 | "print(score)\n", 874 | "print(np.mean(np.array(score)))\n", 875 | "print('*'*10)\n", 876 | "print(\"MLP\")\n", 877 | "mlpc=MLPClassifier(solver='lbfgs', alpha=1e-5,hidden_layer_sizes=(512), random_state=10)\n", 878 | "score = cross_val_score(mlpc, x, y, cv=5)\n", 879 | "print(score)\n", 880 | "print(np.mean(np.array(score)))\n", 881 | "print('*'*10)" 882 | ] 883 | } 884 | ], 885 | "metadata": { 886 | "kernelspec": { 887 | "display_name": "Python 3", 888 | "language": "python", 889 | "name": "python3" 890 | }, 891 | "language_info": { 892 | "codemirror_mode": { 893 | "name": "ipython", 894 | "version": 3 895 | }, 896 | "file_extension": ".py", 897 | "mimetype": "text/x-python", 898 | "name": "python", 899 | "nbconvert_exporter": "python", 900 | "pygments_lexer": "ipython3", 901 | "version": "3.7.4" 902 | } 903 | }, 904 | "nbformat": 4, 905 | "nbformat_minor": 2 906 | } 907 | -------------------------------------------------------------------------------- /images/cnn.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BaratiLab/FaultNet/0789dd763a44fb60d10f0b3c0534caddb1032ae2/images/cnn.png -------------------------------------------------------------------------------- /images/results.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BaratiLab/FaultNet/0789dd763a44fb60d10f0b3c0534caddb1032ae2/images/results.png -------------------------------------------------------------------------------- /images/results_paderborn.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BaratiLab/FaultNet/0789dd763a44fb60d10f0b3c0534caddb1032ae2/images/results_paderborn.png --------------------------------------------------------------------------------