├── LICENSE ├── README.md ├── signatue_recognition.py ├── signatureResults.csv └── test ├── 021002_004.png ├── 021003_004.png └── 021011_004.png /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 Ayush Srivastava 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Signature-recognition 2 | Signature recognition is a behavioural biometric. It can be operated in two different ways: Static: In this mode, users write their signature on paper, digitize it through an optical scanner or a camera, and the biometric system recognizes the signature analyzing its shape. This group is also known as “off-line”. Dynamic: In this mode, users write their signature in a digitizing tablet, which acquires the signature in real time. Another possibility is the acquisition by means of stylus-operated PDAs. Some systems also operate on smart-phones or tablets with a capacitive screen, where users can sign using a finger or an appropriate pen. Dynamic recognition is also known as “on-line”. Dynamic information usually consists of the following information: 3 | 4 | Before downloading the project download the necessary modules: 5 | pip install keras 6 | pip install numpy 7 | pip install pandas 8 | pip install sklearn 9 | pip install os 10 | pip install matplotlib 11 | -------------------------------------------------------------------------------- /signatue_recognition.py: -------------------------------------------------------------------------------- 1 | import keras 2 | from sklearn.model_selection import train_test_split 3 | 4 | TEST_DIR='E:/Python/signatue_recognition/data/test/' 5 | 6 | SIGNATURE_CLASSES = ['A', 'B', 'C','D','E','F','K','L','M','N','O','P'] 7 | 8 | import os, random 9 | import numpy as np 10 | import pandas as pd 11 | from sklearn.model_selection import train_test_split 12 | from sklearn.metrics import log_loss 13 | from sklearn.preprocessing import LabelEncoder 14 | 15 | import matplotlib.pyplot as plt 16 | from matplotlib import ticker 17 | #import seaborn as sns 18 | #%matplotlib inline 19 | 20 | from keras.models import Sequential 21 | from keras.layers import Dropout, Flatten, Convolution2D, MaxPooling2D, ZeroPadding2D, Dense, Activation 22 | from keras.optimizers import SGD, Adagrad 23 | from keras.callbacks import EarlyStopping 24 | from keras.utils import np_utils 25 | from keras.optimizers import RMSprop, Adam 26 | from keras import backend as K 27 | ROWS = 190 28 | COLS = 160 29 | CHANNELS = 3 30 | TRAIN_DIR="E:/Python/signatue_recognition/data/train/" 31 | 32 | def root_mean_squared_error(y_true, y_pred): 33 | """ 34 | RMSE loss function 35 | """ 36 | return K.sqrt(K.mean(K.square(y_pred - y_true), axis=-1)) 37 | 38 | 39 | def get_images(fish): 40 | """Load files from train folder""" 41 | fish_dir = TRAIN_DIR+'{}'.format(fish) 42 | images = [fish+'/'+im for im in os.listdir(fish_dir)] 43 | return images 44 | 45 | 46 | def read_image(src): 47 | import os 48 | from scipy import misc 49 | filepath=src 50 | im=misc.imread(filepath) 51 | import scipy.misc as mc 52 | 53 | return mc.imresize(im,(ROWS,COLS)) 54 | 55 | files = [] 56 | y_all = [] 57 | 58 | for fish in SIGNATURE_CLASSES: 59 | fish_files = get_images(fish) 60 | files.extend(fish_files) 61 | 62 | y_fish = np.tile(fish, len(fish_files)) 63 | y_all.extend(y_fish) 64 | print("{0} photos of {1}".format(len(fish_files), fish)) 65 | 66 | y_all = np.array(y_all) 67 | print(len(files)) 68 | print(len(y_all)) 69 | 70 | 71 | 72 | 73 | X_all = np.ndarray((len(files), ROWS, COLS, CHANNELS), dtype=np.uint8) 74 | 75 | for i, im in enumerate(files): 76 | X_all[i] = read_image(TRAIN_DIR+im) 77 | if i%1000 == 0: print('Processed {} of {}'.format(i, len(files))) 78 | 79 | print(X_all.shape) 80 | 81 | y_all = LabelEncoder().fit_transform(y_all) 82 | y_all = np_utils.to_categorical(y_all) 83 | 84 | 85 | 86 | 87 | from sklearn.model_selection import train_test_split 88 | 89 | X_train, X_valid, y_train, y_valid = train_test_split(X_all, y_all, 90 | test_size=14, random_state=23, 91 | stratify=y_all) 92 | 93 | 94 | 95 | optimizer = RMSprop(lr=1e-4) 96 | objective = 'categorical_crossentropy' 97 | def center_normalize(x): 98 | return (x - K.mean(x)) / K.std(x) 99 | print('1') 100 | model = Sequential() 101 | 102 | model.add(Activation(activation=center_normalize, input_shape=(ROWS, COLS, CHANNELS))) 103 | 104 | model.add(Convolution2D(64, 3, 3, border_mode='same')) 105 | model.add(Activation('relu')) 106 | model.add(Convolution2D(64, 3, 3, border_mode='valid')) 107 | model.add(Activation('relu')) 108 | model.add(ZeroPadding2D(padding=(1, 1))) 109 | model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2))) 110 | model.add(Dropout(0.25)) 111 | 112 | model.add(Convolution2D(96, 3, 3, border_mode='same')) 113 | model.add(Activation('relu')) 114 | model.add(Convolution2D(96, 3, 3, border_mode='valid')) 115 | model.add(Activation('relu')) 116 | model.add(ZeroPadding2D(padding=(1, 1))) 117 | model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2))) 118 | model.add(Dropout(0.25)) 119 | 120 | model.add(Convolution2D(128, 2, 2, border_mode='same')) 121 | model.add(Activation('relu')) 122 | model.add(Convolution2D(128, 2, 2, border_mode='same')) 123 | model.add(Activation('relu')) 124 | model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2))) 125 | model.add(Dropout(0.25)) 126 | 127 | model.add(Flatten()) 128 | model.add(Dense(1024)) 129 | model.add(Activation('relu')) 130 | model.add(Dropout(0.5)) 131 | 132 | 133 | model.add(Dense(len(SIGNATURE_CLASSES))) 134 | model.add(Activation('sigmoid')) 135 | 136 | adam = Adam(lr=0.0001) 137 | model.compile(optimizer=adam, loss=root_mean_squared_error) 138 | 139 | 140 | early_stopping = EarlyStopping(monitor='val_loss', patience=4, verbose=1, mode='auto') 141 | 142 | model.fit(X_train, y_train, batch_size=64, nb_epoch=3, 143 | validation_split=0.1, verbose=1, shuffle=True, callbacks=[early_stopping]) 144 | preds = model.predict(X_valid, verbose=1) 145 | print("Validation Log Loss: {}".format(log_loss(y_valid, preds))) 146 | 147 | 148 | 149 | 150 | test_files = [im for im in os.listdir(TEST_DIR)] 151 | test = np.ndarray((len(test_files), ROWS, COLS, CHANNELS), dtype=np.uint8) 152 | 153 | for i, im in enumerate(test_files): 154 | test[i] = read_image(TEST_DIR+im) 155 | 156 | test_preds = model.predict(test, verbose=1) 157 | submission = pd.DataFrame(test_preds, columns=SIGNATURE_CLASSES) 158 | submission.insert(0, 'image', test_files) 159 | submission.head() 160 | 161 | submission.to_csv('E:/Python/signatue_recognition/signatureResults.csv',index=False) -------------------------------------------------------------------------------- /signatureResults.csv: -------------------------------------------------------------------------------- 1 | image,A,B,C,D,E,F,K,L,M,N,O,P 2 | 021002_004.png,0.15654709935188293,0.1526409238576889,0.12393844127655029,0.1692200005054474,0.12327947467565536,0.1684596687555313,0.1213107630610466,0.14917510747909546,0.16757722198963165,0.15544912219047546,0.16255134344100952,0.1499464213848114 3 | 021003_004.png,0.1833602488040924,0.19250555336475372,0.13957734405994415,0.18059581518173218,0.1416306495666504,0.1773919314146042,0.13607299327850342,0.17255018651485443,0.1839074343442917,0.1870533525943756,0.19179610908031464,0.1664862036705017 4 | 021011_004.png,0.1957799196243286,0.20581355690956116,0.16301706433296204,0.19429752230644226,0.1549103558063507,0.19567155838012695,0.15201936662197113,0.1920786201953888,0.20289131999015808,0.1839257925748825,0.1907159835100174,0.17515985667705536 5 | -------------------------------------------------------------------------------- /test/021002_004.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ayushreal/Signature-recognition/b6e785874ec83e564bb57427a417438919f19a7f/test/021002_004.png -------------------------------------------------------------------------------- /test/021003_004.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ayushreal/Signature-recognition/b6e785874ec83e564bb57427a417438919f19a7f/test/021003_004.png -------------------------------------------------------------------------------- /test/021011_004.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ayushreal/Signature-recognition/b6e785874ec83e564bb57427a417438919f19a7f/test/021011_004.png --------------------------------------------------------------------------------