└── p.py /p.py: -------------------------------------------------------------------------------- 1 | import tkinter as tk 2 | from tkinter import filedialog, messagebox 3 | import numpy as np 4 | import os 5 | import matplotlib 6 | import matplotlib.pyplot as plt 7 | import matplotlib.image as mpimg 8 | import matplotlib.cm as cm 9 | from scipy import ndimage 10 | from skimage.measure import regionprops 11 | from skimage import io 12 | from skimage.filters import threshold_otsu # For finding the threshold for grayscale to binary conversion 13 | import tensorflow as tf 14 | import pandas as pd 15 | import numpy as np 16 | from time import time 17 | import keras 18 | from tensorflow.python.framework import ops 19 | import tensorflow.compat.v1 as tf 20 | 21 | tf.compat.v1.disable_v2_behavior() 22 | 23 | genuine_image_paths = "C:\\Users\\VIGNESHRAJA S\\OneDrive\\Desktop\\jupyter\\real" 24 | forged_image_paths = "C:\\Users\\VIGNESHRAJA S\\OneDrive\\Desktop\\jupyter\\forged" 25 | def rgbgrey(img): 26 | # Converts rgb to grayscale 27 | greyimg = np.zeros((img.shape[0], img.shape[1])) 28 | for row in range(len(img)): 29 | for col in range(len(img[row])): 30 | greyimg[row][col] = np.average(img[row][col]) 31 | return greyimg 32 | 33 | def greybin(img): 34 | # Converts grayscale to binary 35 | blur_radius = 0.8 36 | img = ndimage.gaussian_filter(img, blur_radius) # to remove small components or noise 37 | # img = ndimage.binary_erosion(img).astype(img.dtype) 38 | thres = threshold_otsu(img) 39 | binimg = img > thres 40 | binimg = np.logical_not(binimg) 41 | return binimg 42 | 43 | def preproc(path, img=None, display=True): 44 | if img is None: 45 | img = mpimg.imread(path) 46 | if display: 47 | plt.imshow(img) 48 | plt.show() 49 | grey = rgbgrey(img) #rgb to grey 50 | if display: 51 | plt.imshow(grey, cmap = matplotlib.cm.Greys_r) 52 | plt.show() 53 | binimg = greybin(grey) #grey to binary 54 | if display: 55 | plt.imshow(binimg, cmap = matplotlib.cm.Greys_r) 56 | plt.show() 57 | r, c = np.where(binimg==1) 58 | # Now we will make a bounding box with the boundary as the position of pixels on extreme. 59 | # Thus we will get a cropped image with only the signature part. 60 | signimg = binimg[r.min(): r.max(), c.min(): c.max()] 61 | if display: 62 | plt.imshow(signimg, cmap = matplotlib.cm.Greys_r) 63 | plt.show() 64 | return signimg 65 | 66 | def Ratio(img): 67 | a = 0 68 | for row in range(len(img)): 69 | for col in range(len(img[0])): 70 | if img[row][col]==True: 71 | a = a+1 72 | total = img.shape[0] * img.shape[1] 73 | return a/total 74 | 75 | def Centroid(img): 76 | numOfWhites = 0 77 | a = np.array([0,0]) 78 | for row in range(len(img)): 79 | for col in range(len(img[0])): 80 | if img[row][col]==True: 81 | b = np.array([row,col]) 82 | a = np.add(a,b) 83 | numOfWhites += 1 84 | rowcols = np.array([img.shape[0], img.shape[1]]) 85 | centroid = a/numOfWhites 86 | centroid = centroid/rowcols 87 | return centroid[0], centroid[1] 88 | 89 | def EccentricitySolidity(img): 90 | r = regionprops(img.astype("int8")) 91 | return r[0].eccentricity, r[0].solidity 92 | 93 | 94 | def SkewKurtosis(img): 95 | h,w = img.shape 96 | x = range(w) # cols value 97 | y = range(h) # rows value 98 | #calculate projections along the x and y axes 99 | xp = np.sum(img,axis=0) 100 | yp = np.sum(img,axis=1) 101 | #centroid 102 | cx = np.sum(x*xp)/np.sum(xp) 103 | cy = np.sum(y*yp)/np.sum(yp) 104 | #standard deviation 105 | x2 = (x-cx)**2 106 | y2 = (y-cy)**2 107 | sx = np.sqrt(np.sum(x2*xp)/np.sum(img)) 108 | sy = np.sqrt(np.sum(y2*yp)/np.sum(img)) 109 | 110 | #skewness 111 | x3 = (x-cx)**3 112 | y3 = (y-cy)**3 113 | skewx = np.sum(xp*x3)/(np.sum(img) * sx**3) 114 | skewy = np.sum(yp*y3)/(np.sum(img) * sy**3) 115 | 116 | #Kurtosis 117 | x4 = (x-cx)**4 118 | y4 = (y-cy)**4 119 | # 3 is subtracted to calculate relative to the normal distribution 120 | kurtx = np.sum(xp*x4)/(np.sum(img) * sx**4) - 3 121 | kurty = np.sum(yp*y4)/(np.sum(img) * sy**4) - 3 122 | 123 | return (skewx , skewy), (kurtx, kurty) 124 | 125 | 126 | 127 | def getFeatures(path, img=None, display=False): 128 | if img is None: 129 | img = mpimg.imread(path) 130 | img = preproc(path, display=display) 131 | ratio = Ratio(img) 132 | centroid = Centroid(img) 133 | eccentricity, solidity = EccentricitySolidity(img) 134 | skewness, kurtosis = SkewKurtosis(img) 135 | retVal = (ratio, centroid, eccentricity, solidity, skewness, kurtosis) 136 | return retVal 137 | 138 | 139 | def getCSVFeatures(path, img=None, display=False): 140 | if img is None: 141 | img = mpimg.imread(path) 142 | temp = getFeatures(path, display=display) 143 | features = (temp[0], temp[1][0], temp[1][1], temp[2], temp[3], temp[4][0], temp[4][1], temp[5][0], temp[5][1]) 144 | return features 145 | 146 | def makeCSV(): 147 | if not(os.path.exists('C:\\Users\\VIGNESHRAJA S\\OneDrive\\Desktop\\jupyter\\func')): 148 | os.mkdir('C:\\Users\\VIGNESHRAJA S\\OneDrive\\Desktop\\jupyter\\func') 149 | print('New folder "Features" created') 150 | if not(os.path.exists('C:\\Users\\VIGNESHRAJA S\\OneDrive\\Desktop\\jupyter\\func/Training')): 151 | os.mkdir('C:\\Users\\VIGNESHRAJA S\\OneDrive\\Desktop\\jupyter\\func/Training') 152 | print('New folder "Features/Training" created') 153 | if not(os.path.exists('C:\\Users\\VIGNESHRAJA S\\OneDrive\\Desktop\\jupyter\\func/Testing')): 154 | os.mkdir('C:\\Users\\VIGNESHRAJA S\\OneDrive\\Desktop\\jupyter\\func/Testing') 155 | print('New folder "Features/Testing" created') 156 | # genuine signatures path 157 | gpath = genuine_image_paths 158 | # forged signatures path 159 | fpath = forged_image_paths 160 | for person in range(1,13): 161 | per = ('00'+str(person))[-3:] 162 | print('Saving features for person id-',per) 163 | 164 | with open('C:\\Users\\VIGNESHRAJA S\\OneDrive\\Desktop\\jupyter\\func\\Training/training_'+per+'.csv', 'w') as handle: 165 | handle.write('ratio,cent_y,cent_x,eccentricity,solidity,skew_x,skew_y,kurt_x,kurt_y,output\n') 166 | # Training set 167 | for i in range(0,3): 168 | source = os.path.join(gpath, per+per+'_00'+str(i)+'.png') 169 | features = getCSVFeatures(path=source) 170 | handle.write(','.join(map(str, features))+',1\n') 171 | for i in range(0,3): 172 | source = os.path.join(fpath, '021'+per+'_00'+str(i)+'.png') 173 | features = getCSVFeatures(path=source) 174 | handle.write(','.join(map(str, features))+',0\n') 175 | 176 | with open('C:\\Users\\VIGNESHRAJA S\\OneDrive\\Desktop\\jupyter\\func\\Testing/testing_'+per+'.csv', 'w') as handle: 177 | handle.write('ratio,cent_y,cent_x,eccentricity,solidity,skew_x,skew_y,kurt_x,kurt_y,output\n') 178 | # Testing set 179 | for i in range(3, 5): 180 | source = os.path.join(gpath, per+per+'_00'+str(i)+'.png') 181 | features = getCSVFeatures(path=source) 182 | handle.write(','.join(map(str, features))+',1\n') 183 | for i in range(3,5): 184 | source = os.path.join(fpath, '021'+per+'_00'+str(i)+'.png') 185 | features = getCSVFeatures(path=source) 186 | handle.write(','.join(map(str, features))+',0\n') 187 | def testing(path): 188 | feature = getCSVFeatures(path) 189 | if not(os.path.exists('C:\\Users\\VIGNESHRAJA S\\OneDrive\\Desktop\\jupyter/TestFeatures')): 190 | os.mkdir('C:\\Users\\VIGNESHRAJA S\\OneDrive\\Desktop\\jupyter/TestFeatures') 191 | with open('C:\\Users\\VIGNESHRAJA S\\OneDrive\\Desktop\\jupyter\\TestFeatures/testcsv.csv', 'w') as handle: 192 | handle.write('ratio,cent_y,cent_x,eccentricity,solidity,skew_x,skew_y,kurt_x,kurt_y\n') 193 | handle.write(','.join(map(str, feature))+'\n') 194 | 195 | 196 | 197 | 198 | 199 | 200 | 201 | 202 | makeCSV() 203 | 204 | 205 | n_input = 9 206 | train_person_id = input("Enter person's id : ") 207 | test_image_path = input("Enter path of signature image : ") 208 | train_path = 'C:\\Users\\VIGNESHRAJA S\\OneDrive\\Desktop\\jupyter\\func\\Training/training_'+train_person_id+'.csv' 209 | testing(test_image_path) 210 | test_path = 'C:\\Users\\VIGNESHRAJA S\\OneDrive\\Desktop\\jupyter\\TestFeatures/testcsv.csv' 211 | 212 | def readCSV(train_path, test_path, type2=False): 213 | # Reading train data 214 | df = pd.read_csv(train_path, usecols=range(n_input)) 215 | train_input = np.array(df.values) 216 | train_input = train_input.astype(np.float32, copy=False) # Converting input to float_32 217 | df = pd.read_csv(train_path, usecols=(n_input,)) 218 | temp = [elem[0] for elem in df.values] 219 | correct = np.array(temp) 220 | corr_train = keras.utils.to_categorical(correct,2) # Converting to one hot 221 | # Reading test data 222 | df = pd.read_csv(test_path, usecols=range(n_input)) 223 | test_input = np.array(df.values) 224 | test_input = test_input.astype(np.float32, copy=False) 225 | if not(type2): 226 | df = pd.read_csv(test_path, usecols=(n_input,)) 227 | temp = [elem[0] for elem in df.values] 228 | correct = np.array(temp) 229 | corr_test = keras.utils.to_categorical(correct,2) # Converting to one hot 230 | if not(type2): 231 | return train_input, corr_train, test_input, corr_test 232 | else: 233 | return train_input, corr_train, test_input 234 | 235 | ops.reset_default_graph() 236 | # Parameters 237 | learning_rate = 0.001 238 | training_epochs = 1000 239 | display_step = 1 240 | 241 | # Network Parameters 242 | n_hidden_1 = 7 # 1st layer number of neurons 243 | n_hidden_2 = 10 # 2nd layer number of neurons 244 | n_hidden_3 = 30 # 3rd layer 245 | n_classes = 2 # no. of classes (genuine or forged) 246 | 247 | # tf Graph input 248 | X = tf.compat.v1.placeholder("float", [None, n_input]) 249 | Y = tf.compat.v1.placeholder("float", [None, n_classes]) 250 | 251 | # Store layers weight & bias 252 | weights = { 253 | 'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1], seed=1)), 254 | 'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])), 255 | 'h3': tf.Variable(tf.random_normal([n_hidden_2, n_hidden_3])), 256 | 'out': tf.Variable(tf.random_normal([n_hidden_1, n_classes], seed=2)) 257 | } 258 | biases = { 259 | 'b1': tf.Variable(tf.random_normal([n_hidden_1], seed=3)), 260 | 'b2': tf.Variable(tf.random_normal([n_hidden_2])), 261 | 'b3': tf.Variable(tf.random_normal([n_hidden_3])), 262 | 'out': tf.Variable(tf.random_normal([n_classes], seed=4)) 263 | } 264 | 265 | 266 | # Create model 267 | def multilayer_perceptron(x): 268 | layer_1 = tf.tanh((tf.matmul(x, weights['h1']) + biases['b1'])) 269 | layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2']) 270 | layer_3 = tf.add(tf.matmul(layer_2, weights['h3']), biases['b3']) 271 | out_layer = tf.tanh(tf.matmul(layer_1, weights['out']) + biases['out']) 272 | return out_layer 273 | 274 | # Construct model 275 | logits = multilayer_perceptron(X) 276 | 277 | # Define loss and optimizer 278 | 279 | loss_op = tf.reduce_mean(tf.squared_difference(logits, Y)) 280 | optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate) 281 | train_op = optimizer.minimize(loss_op) 282 | # For accuracies 283 | pred = tf.nn.softmax(logits) # Apply softmax to logits 284 | correct_prediction = tf.equal(tf.argmax(pred,1), tf.argmax(Y,1)) 285 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) 286 | # Initializing the variables 287 | init = tf.global_variables_initializer() 288 | 289 | def evaluate(train_path, test_path, type2=False): 290 | if not(type2): 291 | train_input, corr_train, test_input, corr_test = readCSV(train_path, test_path) 292 | else: 293 | train_input, corr_train, test_input = readCSV(train_path, test_path, type2) 294 | ans = 'Random' 295 | with tf.Session() as sess: 296 | sess.run(init) 297 | # Training cycle 298 | for epoch in range(training_epochs): 299 | # Run optimization op (backprop) and cost op (to get loss value) 300 | _, cost = sess.run([train_op, loss_op], feed_dict={X: train_input, Y: corr_train}) 301 | if cost<0.0001: 302 | break 303 | # # Display logs per epoch step 304 | # if epoch % 999 == 0: 305 | # print("Epoch:", '%04d' % (epoch+1), "cost={:.9f}".format(cost)) 306 | # print("Optimization Finished!") 307 | 308 | # Finding accuracies 309 | accuracy1 = accuracy.eval({X: train_input, Y: corr_train}) 310 | # print("Accuracy for train:", accuracy1) 311 | # print("Accuracy for test:", accuracy2) 312 | if type2 is False: 313 | accuracy2 = accuracy.eval({X: test_input, Y: corr_test}) 314 | return accuracy1, accuracy2 315 | else: 316 | prediction = pred.eval({X: test_input}) 317 | if prediction[0][1]>prediction[0][0]: 318 | return True 319 | else: 320 | return False 321 | 322 | 323 | def trainAndTest(rate=0.001, epochs=1700, neurons=7, display=False): 324 | start = time() 325 | 326 | # Parameters 327 | global training_rate, training_epochs, n_hidden_1 328 | learning_rate = rate 329 | training_epochs = epochs 330 | 331 | # Network Parameters 332 | n_hidden_1 = neurons # 1st layer number of neurons 333 | n_hidden_2 = 7 # 2nd layer number of neurons 334 | n_hidden_3 = 30 # 3rd layer 335 | 336 | train_avg, test_avg = 0, 0 337 | n = 10 338 | for i in range(1,n+1): 339 | if display: 340 | print("Running for Person id",i) 341 | temp = ('0'+str(i))[-2:] 342 | train_score, test_score = evaluate(train_path.replace('01',temp), test_path.replace('01',temp)) 343 | train_avg += train_score 344 | test_avg += test_score 345 | if display: 346 | # print("Number of neurons in Hidden layer-", n_hidden_1) 347 | print("Training average-", train_avg/n) 348 | print("Testing average-", test_avg/n) 349 | print("Time taken-", time()-start) 350 | return train_avg/n, test_avg/n, (time()-start)/n 351 | 352 | 353 | def check_signature(): 354 | person_id = person_id_entry.get() 355 | file_path = file_path_entry.get() 356 | 357 | if not person_id or not file_path: 358 | messagebox.showerror("Input Error", "Please enter both person ID and file path.") 359 | return 360 | 361 | train_path = f'C:\\Users\\VIGNESHRAJA S\\OneDrive\\Desktop\\jupyter\\func\\Training/training_{person_id}.csv' 362 | test_image_path = file_path 363 | test_path = 'C:\\Users\\VIGNESHRAJA S\\OneDrive\\Desktop\\jupyter\\TestFeatures/testcsv.csv' 364 | 365 | testing(test_image_path) 366 | result = evaluate(train_path, test_path, type2=True) 367 | if result: 368 | messagebox.showinfo("Result", "Genuine Image") 369 | else: 370 | messagebox.showinfo("Result", "Forged Image") 371 | 372 | def browse_file(): 373 | file_path = filedialog.askopenfilename() 374 | file_path_entry.delete(0, tk.END) 375 | file_path_entry.insert(0, file_path) 376 | 377 | # GUI Setup 378 | root = tk.Tk() 379 | root.title("Signature Verification") 380 | 381 | # Person ID 382 | person_id_label = tk.Label(root, text="Person ID") 383 | person_id_label.grid(row=0, column=0, padx=10, pady=10) 384 | person_id_entry = tk.Entry(root) 385 | person_id_entry.grid(row=0, column=1, padx=10, pady=10) 386 | 387 | # File Path 388 | file_path_label = tk.Label(root, text="Signature Image Path") 389 | file_path_label.grid(row=1, column=0, padx=10, pady=10) 390 | file_path_entry = tk.Entry(root, width=40) 391 | file_path_entry.grid(row=1, column=1, padx=10, pady=10) 392 | browse_button = tk.Button(root, text="Browse", command=browse_file) 393 | browse_button.grid(row=1, column=2, padx=10, pady=10) 394 | 395 | # Verify Button 396 | verify_button = tk.Button(root, text="Verify Signature", command=check_signature) 397 | verify_button.grid(row=2, column=0, columnspan=3, pady=20) 398 | 399 | root.mainloop() 400 | --------------------------------------------------------------------------------