├── Code ├── Single_Test.py └── Train.py ├── README.md └── Readme_Images ├── Defects.png ├── Dnet.png ├── Dnet2.png ├── PostIP.png ├── Result.png └── TempTest.png /Code/Single_Test.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | from PIL import Image, ImageOps 3 | import numpy as np 4 | import imutils 5 | import matplotlib.pyplot as plt 6 | from keras.models import Sequential 7 | from keras.layers import Dense 8 | from keras.models import model_from_json 9 | import numpy as np 10 | import os 11 | from keras.preprocessing.image import ImageDataGenerator 12 | import pandas as pd 13 | from keras.models import load_model 14 | ######################################################################################## 15 | 16 | 17 | def morph_transform(ref , test): 18 | img1 = test 19 | img2 = ref 20 | height, width, depth = img2.shape 21 | orb_detector = cv2.ORB_create(5000) 22 | kp1, d1 = orb_detector.detectAndCompute(img1, None) 23 | kp2, d2 = orb_detector.detectAndCompute(img2, None) 24 | matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck = True) 25 | matches = matcher.match(d1, d2) 26 | matches.sort(key = lambda x: x.distance) 27 | matches = matches[:int(len(matches)*90)] 28 | no_of_matches = len(matches) 29 | p1 = np.zeros((no_of_matches, 2)) 30 | p2 = np.zeros((no_of_matches, 2)) 31 | 32 | for i in range(len(matches)): 33 | p1[i, :] = kp1[matches[i].queryIdx].pt 34 | p2[i, :] = kp2[matches[i].trainIdx].pt 35 | 36 | homography, mask = cv2.findHomography(p1, p2, cv2.RANSAC) 37 | transformed_img = cv2.warpPerspective(test, 38 | homography, (width, height)) 39 | return transformed_img 40 | 41 | 42 | image1 = cv2.imread('TemplatePath') 43 | image2 = cv2.imread('TestPath') 44 | ref_test = morph_transform(image1 , image2) 45 | image2 = ref_test 46 | image1 = cv2.medianBlur(image1,5) 47 | image2 = cv2.medianBlur(image2,5) 48 | image_res = cv2.bitwise_xor(image1 , image2) 49 | cv2.imshow('RES_XOR' , image_res) 50 | cv2.waitKey(0) 51 | image_res = cv2.medianBlur(image_res,5) 52 | kernel1 = cv2.getStructuringElement(cv2.MORPH_RECT,(15,15)) 53 | image_res = cv2.morphologyEx(image_res, cv2.MORPH_CLOSE, kernel1) 54 | kernel2 = cv2.getStructuringElement(cv2.MORPH_RECT,(3,3)) 55 | image_res = cv2.morphologyEx(image_res, cv2.MORPH_OPEN, kernel2) 56 | thresh , image_res = cv2.threshold(image_res , 125 , 255 , cv2.THRESH_BINARY) 57 | cv2.imshow('RES_XOR_AFTERFILT' , image_res) 58 | cv2.waitKey(0) 59 | edges = cv2.Canny(image_res, 30, 200) 60 | cv2.imshow('RES_CONTOURS' , edges) 61 | cv2.waitKey(0) 62 | cnts = cv2.findContours(edges, cv2.RETR_EXTERNAL , cv2.CHAIN_APPROX_SIMPLE) 63 | cnts = imutils.grab_contours(cnts) 64 | img2 = cv2.imread('TestPath') 65 | 66 | 67 | 68 | X=[] 69 | Y=[] 70 | CX=[] 71 | CY=[] 72 | C=[] 73 | 74 | for c in cnts: 75 | M = cv2.moments(c) 76 | if(M["m00"] != 0): 77 | cx = int(M["m10"] / M["m00"]) 78 | cy = int(M["m01"] / M["m00"]) 79 | CX.append(cx) 80 | CY.append(cy) 81 | C.append((cx,cy)) 82 | 83 | print(CX) 84 | print(CY) 85 | 86 | implot = plt.imshow(img2) 87 | plt.scatter(CX , CY , c='r' , s=40) 88 | plt.show() 89 | 90 | 91 | im = Image.open("TestPath") 92 | model = load_model('ModelPath') 93 | # print(model.summary()) 94 | classes = { 95 | 0: "Open", 96 | 1: "Short", 97 | 2: "Mousebite", 98 | 3: "Spur", 99 | 4: "Copper", 100 | 5: "Pin-Hole" 101 | } 102 | 103 | pred=[] 104 | confidence=[] 105 | for c in C: 106 | im1 = im.crop((c[0]-32 , c[1]-32 , c[0]+32 , c[1]+32)) 107 | im1 = np.array(im1) 108 | im1 = np.expand_dims(im1 , axis=3) 109 | im1 = np.expand_dims(im1 , axis=0) 110 | print(im1.shape) 111 | a = model.predict(im1, verbose=1, batch_size=1) 112 | pred.append(np.argmax(a)) 113 | confidence.append(a) 114 | 115 | 116 | plot_final = plt.imshow(img2) 117 | plt.scatter(CX , CY , c='r' , s=4) 118 | for i, txt in enumerate(pred): 119 | plt.annotate([classes[txt] , confidence[i][0][txt]] , (CX[i], CY[i]) , color='r') 120 | plt.show() 121 | 122 | 123 | 124 | 125 | 126 | 127 | 128 | -------------------------------------------------------------------------------- /Code/Train.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | # ximport skimage as sk 3 | import keras 4 | import cv2 5 | import numpy as np 6 | from keras.layers import Input, Dense, Dropout, Activation, Concatenate, BatchNormalization, Flatten 7 | from keras.models import Model 8 | from keras.layers import Conv2D, GlobalAveragePooling2D, AveragePooling2D, ZeroPadding2D, MaxPooling2D 9 | from keras.regularizers import l2 10 | from PIL import Image, ImageOps 11 | from keras.preprocessing.image import ImageDataGenerator 12 | from keras.optimizers import SGD 13 | from keras.callbacks import LearningRateScheduler 14 | import math 15 | ############################################################################################# 16 | 17 | def step_decay(epoch): 18 | initial_lrate = 0.01 19 | drop = 0.1 20 | epochs_drop = 7.0 21 | lrate = initial_lrate * math.pow(drop, math.floor((1+epoch)/epochs_drop)) 22 | return lrate 23 | 24 | 25 | 26 | def DenseNet(input_shape=None, dense_blocks=3, dense_layers=-1, growth_rate=12, nb_classes=None, dropout_rate=None, 27 | bottleneck=False, compression=1.0, weight_decay=1e-4, depth=40): 28 | 29 | if nb_classes==None: 30 | raise Exception('Please define number of classes (e.g. num_classes=10). This is required for final softmax.') 31 | 32 | if compression <=0.0 or compression > 1.0: 33 | raise Exception('Compression have to be a value between 0.0 and 1.0. If you set compression to 1.0 it will be turn off.') 34 | 35 | if type(dense_layers) is list: 36 | if len(dense_layers) != dense_blocks: 37 | raise AssertionError('Number of dense blocks have to be same length to specified layers') 38 | elif dense_layers == -1: 39 | if bottleneck: 40 | dense_layers = (depth - (dense_blocks + 1))/dense_blocks // 2 41 | else: 42 | dense_layers = (depth - (dense_blocks + 1))//dense_blocks 43 | dense_layers = [int(dense_layers) for _ in range(dense_blocks)] 44 | else: 45 | dense_layers = [int(dense_layers) for _ in range(dense_blocks)] 46 | 47 | img_input = Input(shape=input_shape) 48 | nb_channels = growth_rate * 2 49 | 50 | 51 | # Initial convolution layer 52 | x = ZeroPadding2D(padding=((3, 3), (3, 3)))(img_input) 53 | x = Conv2D(nb_channels, (7,7),strides=2 , use_bias=False, kernel_regularizer=l2(weight_decay))(x) # 54 | x = BatchNormalization(gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay))(x)# 55 | x = Activation('relu')(x)# 56 | x = ZeroPadding2D(padding=((1,1), (1, 1)))(x) 57 | x = MaxPooling2D(pool_size = (3, 3), strides = 2)(x) # 58 | 59 | # Building dense blocks 60 | for block in range(dense_blocks): 61 | 62 | # Add dense block 63 | x, nb_channels = dense_block(x, dense_layers[block], nb_channels, growth_rate, dropout_rate, bottleneck, weight_decay) 64 | 65 | if block < dense_blocks - 1: # if it's not the last dense block 66 | # Add transition_block 67 | x = transition_layer(x, nb_channels, dropout_rate, compression, weight_decay) 68 | nb_channels = int(nb_channels * compression) 69 | x = AveragePooling2D(pool_size = 7)(x) #DECIDING LINE 70 | x = Flatten(data_format = 'channels_last')(x) 71 | x = Dense(nb_classes, activation='softmax', kernel_regularizer=l2(weight_decay), bias_regularizer=l2(weight_decay))(x) 72 | 73 | model_name = None 74 | if growth_rate >= 36: 75 | model_name = 'widedense' 76 | else: 77 | model_name = 'dense' 78 | 79 | if bottleneck: 80 | model_name = model_name + 'b' 81 | 82 | if compression < 1.0: 83 | model_name = model_name + 'c' 84 | 85 | return Model(img_input, x, name=model_name), model_name 86 | 87 | 88 | def dense_block(x, nb_layers, nb_channels, growth_rate, dropout_rate=None, bottleneck=False, weight_decay=1e-4): 89 | 90 | x_list = [x] 91 | for i in range(nb_layers): 92 | cb = convolution_block(x, growth_rate, dropout_rate, bottleneck, weight_decay) 93 | x_list.append(cb) 94 | x = Concatenate(axis=-1)(x_list) 95 | nb_channels += growth_rate 96 | return x, nb_channels 97 | 98 | 99 | def convolution_block(x, nb_channels, dropout_rate=None, bottleneck=False, weight_decay=1e-4): 100 | 101 | growth_rate = nb_channels/2 102 | # Bottleneck 103 | if bottleneck: 104 | bottleneckWidth = 4 105 | x = BatchNormalization(gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay))(x) 106 | x = Activation('relu')(x) 107 | x = Conv2D(nb_channels * bottleneckWidth, (1, 1), use_bias=False, kernel_regularizer=l2(weight_decay))(x) 108 | # Dropout 109 | if dropout_rate: 110 | x = Dropout(dropout_rate)(x) 111 | 112 | # Standard (BN-ReLU-Conv) 113 | x = BatchNormalization(gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay))(x) 114 | x = Activation('relu')(x) 115 | x = Conv2D(nb_channels, (3, 3), padding='same', use_bias=False, kernel_regularizer=l2(weight_decay))(x) 116 | 117 | # Dropout 118 | if dropout_rate: 119 | x = Dropout(dropout_rate)(x) 120 | 121 | return x 122 | 123 | 124 | def transition_layer(x, nb_channels, dropout_rate=None, compression=1.0, weight_decay=1e-4): 125 | 126 | x = BatchNormalization(gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay))(x) 127 | x = Activation('relu')(x) 128 | x = Conv2D(int(nb_channels*compression), (1, 1), padding='same', 129 | use_bias=False, kernel_regularizer=l2(weight_decay))(x) 130 | 131 | # Adding dropout 132 | if dropout_rate: 133 | x = Dropout(dropout_rate)(x) 134 | 135 | x = AveragePooling2D((2, 2), strides=(2, 2))(x) 136 | return x 137 | 138 | 139 | if __name__ == '__main__': 140 | 141 | model = DenseNet(input_shape = (64,64,1) , dense_blocks = 2 , dense_layers = 6 , growth_rate = 32 , nb_classes = 6 , bottleneck = True , depth = 27, weight_decay = 1e-5) 142 | print(model[0].summary()) 143 | opt = SGD(lr = 0.0 , momentum = 0.9) 144 | model[0].compile(optimizer=opt , loss='categorical_crossentropy' , metrics=['accuracy']) 145 | train_datagen = ImageDataGenerator(data_format = "channels_last") 146 | train_generator = train_datagen.flow_from_directory('TrainPath' , target_size = (64,64) , color_mode = 'grayscale' , batch_size = 8) 147 | STEP_SIZE_TRAIN=train_generator.n//train_generator.batch_size 148 | lrate = LearningRateScheduler(step_decay, verbose=1) 149 | callbacks_list = [lrate] 150 | model[0].fit_generator(train_generator , steps_per_epoch=STEP_SIZE_TRAIN , epochs = 25, callbacks=callbacks_list, verbose=1) 151 | model[0].save("SavePath") 152 | 153 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # PCB-Defects-Detection-and-Classification 2 | 3 | ## This is an implementation of the paper: https://arxiv.org/pdf/1901.08204.pdf 4 | 5 | ## Overview: 6 | The paper proposes a method to first detect PCB defects using template matching and image processing. Then classify each of the defects using a Densely Connected Convolutional Network (DenseNets) into the following categories, 7 | 8 | 1) Missing Hole 9 | 2) Mouse Bite 10 | 3) Open Circuit 11 | 4) Short 12 | 5) Spur 13 | 6) Spurious Copper 14 | 15 | ![alt text](https://github.com/MukundSai7907/PCB-Defects-Detection-and-Classification/blob/main/Readme_Images/Defects.png?raw=true) 16 | 17 | 18 | ## Classifier Model: 19 | 20 | The DenseNet has a very popular structure with local interconnections as shown below 21 | 22 | ![alt text](https://github.com/MukundSai7907/PCB-Defects-Detection-and-Classification/blob/main/Readme_Images/Dnet.png?raw=true) 23 | 24 | In the model proposed, two of these "dense" blocks used are encapsulated between Covolution and Pooling layers as shown below 25 | 26 | 27 | ![alt text](https://github.com/MukundSai7907/PCB-Defects-Detection-and-Classification/blob/main/Readme_Images/Dnet2.png?raw=true) 28 | 29 | ## Results: 30 | 31 | A sample template (left) and defective image (right) are shown below 32 | 33 | ![alt text](https://github.com/MukundSai7907/PCB-Defects-Detection-and-Classification/blob/main/Readme_Images/TempTest.png?raw=true) 34 | 35 | From here, after template matching and some image transformations (detailed in the paper) we localize the defects as shown 36 | 37 | ![alt text](https://github.com/MukundSai7907/PCB-Defects-Detection-and-Classification/blob/main/Readme_Images/PostIP.png?raw=true) 38 | 39 | Feeding an ROI drawn around each of these defects to the DenseNet, the final result has the defect labelled along with the confidence 40 | 41 | ![alt text](https://github.com/MukundSai7907/PCB-Defects-Detection-and-Classification/blob/main/Readme_Images/Result.png?raw=true) 42 | 43 | -------------------------------------------------------------------------------- /Readme_Images/Defects.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MukundSai7907/PCB-Defects-Classification-Using-Deep-Learning/7b136d6c0784046faa9ea6376b3b76ddf4d21381/Readme_Images/Defects.png -------------------------------------------------------------------------------- /Readme_Images/Dnet.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MukundSai7907/PCB-Defects-Classification-Using-Deep-Learning/7b136d6c0784046faa9ea6376b3b76ddf4d21381/Readme_Images/Dnet.png -------------------------------------------------------------------------------- /Readme_Images/Dnet2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MukundSai7907/PCB-Defects-Classification-Using-Deep-Learning/7b136d6c0784046faa9ea6376b3b76ddf4d21381/Readme_Images/Dnet2.png -------------------------------------------------------------------------------- /Readme_Images/PostIP.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MukundSai7907/PCB-Defects-Classification-Using-Deep-Learning/7b136d6c0784046faa9ea6376b3b76ddf4d21381/Readme_Images/PostIP.png -------------------------------------------------------------------------------- /Readme_Images/Result.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MukundSai7907/PCB-Defects-Classification-Using-Deep-Learning/7b136d6c0784046faa9ea6376b3b76ddf4d21381/Readme_Images/Result.png -------------------------------------------------------------------------------- /Readme_Images/TempTest.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MukundSai7907/PCB-Defects-Classification-Using-Deep-Learning/7b136d6c0784046faa9ea6376b3b76ddf4d21381/Readme_Images/TempTest.png --------------------------------------------------------------------------------