├── myface.bat ├── hide.vbs ├── face_3.py ├── face_1.py ├── face_4.py ├── face_2.py └── README.md /myface.bat: -------------------------------------------------------------------------------- 1 | call activate tenserflow02 2 | cd /d E:\ziliao\LearningPy\face 3 | python face_4.py -------------------------------------------------------------------------------- /hide.vbs: -------------------------------------------------------------------------------- 1 | Set ws = CreateObject("Wscript.Shell") 2 | ws.run "cmd /c E:\ziliao\LearningPy\face\myface.bat",vbhide 3 | -------------------------------------------------------------------------------- /face_3.py: -------------------------------------------------------------------------------- 1 | # -*- codeing: utf-8 -*- 2 | import sys 3 | import os 4 | from cv2 import cv2 5 | 6 | input_dir = './lfw' 7 | output_dir = './other_faces' 8 | size = 64 9 | 10 | if not os.path.exists(output_dir): 11 | os.makedirs(output_dir) 12 | 13 | def close_cv2(): 14 | """删除cv窗口""" 15 | while(1): 16 | if(cv2.waitKey(100)==27): 17 | break 18 | cv2.destroyAllWindows() 19 | # 获取分类器 20 | haar = cv2.CascadeClassifier(r'E:\ProgramData\Anaconda3\envs\tenserflow02\Lib\site-packages\cv2\data\haarcascade_frontalface_default.xml') 21 | 22 | index = 1 23 | for (path, dirnames, filenames) in os.walk(input_dir): 24 | for filename in filenames: 25 | if filename.endswith('.jpg'): 26 | print('Being processed picture %s' % index) 27 | img_path = path+'/'+filename 28 | # # 从文件读取图片 29 | print(img_path) 30 | img = cv2.imread(img_path) 31 | # cv2.imshow(" ",img) 32 | # close_cv2() 33 | # 转为灰度图片 34 | 35 | gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 36 | faces = haar.detectMultiScale(gray_img, 1.3, 5) 37 | for f_x, f_y, f_w, f_h in faces: 38 | face = img[f_y:f_y+f_h, f_x:f_x+f_w] 39 | face = cv2.resize(face, (64,64)) 40 | ''' 41 | if n % 3 == 1: 42 | face = relight(face, 1, 50) 43 | elif n % 3 == 2: 44 | face = relight(face, 0.5, 0) 45 | ''' 46 | # face = relight(face, random.uniform(0.5, 1.5), random.randint(-50, 50)) 47 | cv2.imshow('img', face) 48 | cv2.imwrite(output_dir+'/'+str(index)+'.jpg', face) 49 | index+=1 50 | key = cv2.waitKey(30) & 0xff 51 | if key == 27: 52 | sys.exit(0) -------------------------------------------------------------------------------- /face_1.py: -------------------------------------------------------------------------------- 1 | 2 | # 制作自己人脸数据 3 | 4 | from cv2 import cv2 5 | import os 6 | import sys 7 | import random 8 | 9 | out_dir = './my_faces' 10 | if not os.path.exists(out_dir): 11 | os.makedirs(out_dir) 12 | 13 | 14 | # 改变亮度与对比度 15 | def relight(img, alpha=1, bias=0): 16 | w = img.shape[1] 17 | h = img.shape[0] 18 | #image = [] 19 | for i in range(0,w): 20 | for j in range(0,h): 21 | for c in range(3): 22 | tmp = int(img[j,i,c]*alpha + bias) 23 | if tmp > 255: 24 | tmp = 255 25 | elif tmp < 0: 26 | tmp = 0 27 | img[j,i,c] = tmp 28 | return img 29 | 30 | 31 | # 获取分类器 32 | haar = cv2.CascadeClassifier(r'E:\ProgramData\Anaconda3\envs\tenserflow02\Lib\site-packages\cv2\data\haarcascade_frontalface_default.xml') 33 | 34 | # 打开摄像头 参数为输入流,可以为摄像头或视频文件 35 | camera = cv2.VideoCapture(0) 36 | 37 | n = 1 38 | while 1: 39 | if (n <= 5000): 40 | print('It`s processing %s image.' % n) 41 | # 读帧 42 | success, img = camera.read() 43 | gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 44 | faces = haar.detectMultiScale(gray_img, 1.3, 5) 45 | for f_x, f_y, f_w, f_h in faces: 46 | face = img[f_y:f_y+f_h, f_x:f_x+f_w] 47 | face = cv2.resize(face, (64,64)) 48 | ''' 49 | if n % 3 == 1: 50 | face = relight(face, 1, 50) 51 | elif n % 3 == 2: 52 | face = relight(face, 0.5, 0) 53 | ''' 54 | face = relight(face, random.uniform(0.5, 1.5), random.randint(-50, 50)) 55 | cv2.imshow('img', face) 56 | cv2.imwrite(out_dir+'/'+str(n)+'.jpg', face) 57 | n+=1 58 | key = cv2.waitKey(30) & 0xff 59 | if key == 27: 60 | break 61 | else: 62 | break 63 | 64 | -------------------------------------------------------------------------------- /face_4.py: -------------------------------------------------------------------------------- 1 | #识别自己 2 | from __future__ import absolute_import, division, print_function 3 | import tensorflow as tf 4 | 5 | from cv2 import cv2 6 | import os 7 | import sys 8 | import random 9 | import numpy as np 10 | from sklearn.model_selection import train_test_split 11 | from sklearn.metrics import classification_report 12 | from sklearn.metrics import cohen_kappa_score 13 | from ctypes import * 14 | import time 15 | import sys 16 | 17 | 18 | def getPaddingSize(img): 19 | h, w, _ = img.shape 20 | top, bottom, left, right = (0,0,0,0) 21 | longest = max(h, w) 22 | 23 | if w < longest: 24 | tmp = longest - w 25 | # //表示整除符号 26 | left = tmp // 2 27 | right = tmp - left 28 | elif h < longest: 29 | tmp = longest - h 30 | top = tmp // 2 31 | bottom = tmp - top 32 | else: 33 | pass 34 | return top, bottom, left, right 35 | 36 | def readData(path, h,w,imgs,labs): 37 | for filename in os.listdir(path): 38 | if filename.endswith('.jpg'): 39 | filename = path + '/' + filename 40 | 41 | img = cv2.imread(filename) 42 | # cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 43 | top,bottom,left,right = getPaddingSize(img) 44 | # 将图片放大, 扩充图片边缘部分 45 | img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=[0,0,0]) 46 | img = cv2.resize(img, (h, w)) 47 | 48 | imgs.append(img) 49 | labs.append(path) 50 | return imgs,labs 51 | # 改变亮度与对比度 52 | def relight(img, alpha=1, bias=0): 53 | w = img.shape[1] 54 | h = img.shape[0] 55 | #image = [] 56 | for i in range(0,w): 57 | for j in range(0,h): 58 | for c in range(3): 59 | tmp = int(img[j,i,c]*alpha + bias) 60 | if tmp > 255: 61 | tmp = 255 62 | elif tmp < 0: 63 | tmp = 0 64 | img[j,i,c] = tmp 65 | return img 66 | 67 | out_dir = './temp_faces' 68 | if not os.path.exists(out_dir): 69 | os.makedirs(out_dir) 70 | 71 | # 获取分类器 72 | haar = cv2.CascadeClassifier(r'E:\ProgramData\Anaconda3\envs\tenserflow02\Lib\site-packages\cv2\data\haarcascade_frontalface_default.xml') 73 | 74 | # 打开摄像头 参数为输入流,可以为摄像头或视频文件 75 | camera = cv2.VideoCapture(0) 76 | n = 1 77 | 78 | start = time.clock() 79 | while 1: 80 | if (n <= 20): 81 | print('It`s processing %s image.' % n) 82 | # 读帧 83 | success, img = camera.read() 84 | gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 85 | faces = haar.detectMultiScale(gray_img, 1.3, 5) 86 | for f_x, f_y, f_w, f_h in faces: 87 | face = img[f_y:f_y+f_h, f_x:f_x+f_w] 88 | face = cv2.resize(face, (64,64)) 89 | # face = relight(face, random.uniform(0.5, 1.5), random.randint(-50, 50)) 90 | cv2.imshow('img', face) 91 | cv2.imwrite(out_dir+'/'+str(n)+'.jpg', face) 92 | n+=1 93 | key = cv2.waitKey(30) & 0xff 94 | if key == 27: 95 | break 96 | end = time.clock() 97 | print(str(end-start)) 98 | if (end-start)>10: 99 | user32 = windll.LoadLibrary('user32.dll') 100 | user32.LockWorkStation() 101 | sys.exit() 102 | else: 103 | break 104 | 105 | 106 | my_faces_path = out_dir 107 | size = 64 108 | 109 | imgs = [] 110 | labs = [] 111 | imgs,labs=readData(my_faces_path,size,size,imgs,labs) 112 | # 将图片数据与标签转换成数组 113 | imgs = np.array(imgs) 114 | # labs = np.array([[0,1] if lab == my_faces_path else [1,0] for lab in labs]) 115 | labs = np.array([[1] if lab == my_faces_path else [0] for lab in labs]) 116 | # 随机划分测试集与训练集 117 | train_x,test_x,train_y,test_y = train_test_split(imgs, labs, test_size=0.9, random_state=random.randint(0,100)) 118 | 119 | # 参数:图片数据的总数,图片的高、宽、通道 120 | train_x = train_x.reshape(train_x.shape[0], size, size, 3) 121 | test_x = test_x.reshape(test_x.shape[0], size, size, 3) 122 | 123 | # 将数据转换成小于1的数 124 | train_x = train_x.astype('float32')/255.0 125 | test_x = test_x.astype('float32')/255.0 126 | 127 | restored_model = tf.keras.models.load_model(r'C:\Users\Administrator\Desktop\my_model.h5') 128 | pre_result=restored_model.predict_classes(test_x) 129 | print(pre_result.shape) 130 | print(pre_result) 131 | acc=sum(pre_result==1)/pre_result.shape[0] 132 | print("相似度: "+str(acc)) 133 | 134 | 135 | 136 | 137 | 138 | 139 | if acc > 0.8: 140 | print("你是张睿祥") 141 | else: 142 | user32 = windll.LoadLibrary('user32.dll') 143 | user32.LockWorkStation() 144 | 145 | -------------------------------------------------------------------------------- /face_2.py: -------------------------------------------------------------------------------- 1 | # -*- codeing: utf-8 -*- 2 | from __future__ import absolute_import, division, print_function 3 | 4 | import tensorflow as tf 5 | from cv2 import cv2 6 | import numpy as np 7 | import os 8 | import random 9 | import sys 10 | from sklearn.model_selection import train_test_split 11 | from sklearn.metrics import classification_report 12 | # from keras import backend as K 13 | 14 | def getPaddingSize(img): 15 | h, w, _ = img.shape 16 | top, bottom, left, right = (0,0,0,0) 17 | longest = max(h, w) 18 | 19 | if w < longest: 20 | tmp = longest - w 21 | # //表示整除符号 22 | left = tmp // 2 23 | right = tmp - left 24 | elif h < longest: 25 | tmp = longest - h 26 | top = tmp // 2 27 | bottom = tmp - top 28 | else: 29 | pass 30 | return top, bottom, left, right 31 | 32 | def readData(path, h,w,imgs,labs): 33 | for filename in os.listdir(path): 34 | if filename.endswith('.jpg'): 35 | filename = path + '/' + filename 36 | 37 | img = cv2.imread(filename) 38 | # cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 39 | top,bottom,left,right = getPaddingSize(img) 40 | # 将图片放大, 扩充图片边缘部分 41 | img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=[0,0,0]) 42 | img = cv2.resize(img, (h, w)) 43 | 44 | imgs.append(img) 45 | labs.append(path) 46 | return imgs,labs 47 | 48 | 49 | 50 | 51 | def get_model(): 52 | model = tf.keras.Sequential() 53 | # 第一层卷积,卷积的数量为128,卷积的高和宽是3x3,激活函数使用relu 54 | model.add(tf.keras.layers.Conv2D(128, kernel_size=3, activation='relu', input_shape=(64, 64, 3))) 55 | # 第二层卷积 56 | model.add(tf.keras.layers.Conv2D(64, kernel_size=3, activation='relu')) 57 | #把多维数组压缩成一维,里面的操作可以简单理解为reshape,方便后面Dense使用 58 | model.add(tf.keras.layers.Flatten()) 59 | #对应cnn的全链接层,可以简单理解为把上面的小图汇集起来,进行分类 60 | model.add(tf.keras.layers.Dense(40, activation='softmax')) 61 | model.compile(optimizer='adam', 62 | loss='sparse_categorical_crossentropy', 63 | metrics=['accuracy']) 64 | return model 65 | 66 | def facemain(): 67 | my_faces_path = './my_faces' 68 | other_faces_path = './other_faces' 69 | size = 64 70 | 71 | imgs = [] 72 | labs = [] 73 | imgs,labs=readData(my_faces_path,size,size,imgs,labs) 74 | imgs,labs=readData(other_faces_path,size,size,imgs,labs) 75 | 76 | 77 | # 将图片数据与标签转换成数组 78 | imgs = np.array(imgs) 79 | # labs = np.array([[0,1] if lab == my_faces_path else [1,0] for lab in labs]) 80 | labs = np.array([[1] if lab == my_faces_path else [0] for lab in labs]) 81 | print(imgs.shape) 82 | print(labs.shape) 83 | # 随机划分测试集与训练集 84 | train_x,test_x,train_y,test_y = train_test_split(imgs, labs, test_size=0.8, random_state=random.randint(0,100)) 85 | 86 | # 参数:图片数据的总数,图片的高、宽、通道 87 | train_x = train_x.reshape(train_x.shape[0], size, size, 3) 88 | test_x = test_x.reshape(test_x.shape[0], size, size, 3) 89 | 90 | # 将数据转换成小于1的数 91 | train_x = train_x.astype('float32')/255.0 92 | test_x = test_x.astype('float32')/255.0 93 | 94 | print('train size:%s, test size:%s' % (len(train_x), len(test_x))) 95 | # 图片块,每次取100张图片 96 | batch_size = 100 97 | num_batch = len(train_x) // batch_size 98 | 99 | 100 | model=get_model() 101 | model.fit(train_x, train_y, epochs=5) 102 | model.save(r'C:\Users\Administrator\Desktop\my_model.h5') 103 | 104 | # restored_model = tf.keras.models.load_model(r'C:\Users\Administrator\Desktop\my_model.h5') 105 | # pre_result=restored_model.predict_classes(test_x) 106 | # print(classification_report(test_y, pre_result)) 107 | 108 | facemain() 109 | # predict_y = model.predict(test_x) 110 | 111 | 112 | 113 | 114 | 115 | 116 | # x = K.placeholder(tf.float32, [None, size, size, 3]) 117 | # y_ = K.placeholder(tf.float32, [None, 2]) 118 | 119 | # keep_prob_5 = K.placeholder(tf.float32) 120 | # keep_prob_75 = K.placeholder(tf.float32) 121 | 122 | # def weightVariable(shape): 123 | # init = tf.random_normal(shape, stddev=0.01) 124 | # return tf.Variable(init) 125 | 126 | # def biasVariable(shape): 127 | # init = tf.random_normal(shape) 128 | # return tf.Variable(init) 129 | 130 | # def conv2d(x, W): 131 | # return tf.nn.conv2d(x, W, strides=[1,1,1,1], padding='SAME') 132 | 133 | # def maxPool(x): 134 | # return tf.nn.max_pool(x, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME') 135 | 136 | # def dropout(x, keep): 137 | # return tf.nn.dropout(x, keep) 138 | 139 | # def cnnLayer(): 140 | # # 第一层 141 | # W1 = weightVariable([3,3,3,32]) # 卷积核大小(3,3), 输入通道(3), 输出通道(32) 142 | # b1 = biasVariable([32]) 143 | # # 卷积 144 | # conv1 = tf.nn.relu(conv2d(x, W1) + b1) 145 | # # 池化 146 | # pool1 = maxPool(conv1) 147 | # # 减少过拟合,随机让某些权重不更新 148 | # drop1 = dropout(pool1, keep_prob_5) 149 | 150 | # # 第二层 151 | # W2 = weightVariable([3,3,32,64]) 152 | # b2 = biasVariable([64]) 153 | # conv2 = tf.nn.relu(conv2d(drop1, W2) + b2) 154 | # pool2 = maxPool(conv2) 155 | # drop2 = dropout(pool2, keep_prob_5) 156 | 157 | # # 第三层 158 | # W3 = weightVariable([3,3,64,64]) 159 | # b3 = biasVariable([64]) 160 | # conv3 = tf.nn.relu(conv2d(drop2, W3) + b3) 161 | # pool3 = maxPool(conv3) 162 | # drop3 = dropout(pool3, keep_prob_5) 163 | 164 | # # 全连接层 165 | # Wf = weightVariable([8*16*32, 512]) 166 | # bf = biasVariable([512]) 167 | # drop3_flat = tf.reshape(drop3, [-1, 8*16*32]) 168 | # dense = tf.nn.relu(tf.matmul(drop3_flat, Wf) + bf) 169 | # dropf = dropout(dense, keep_prob_75) 170 | 171 | # # 输出层 172 | # Wout = weightVariable([512,2]) 173 | # bout = weightVariable([2]) 174 | # #out = tf.matmul(dropf, Wout) + bout 175 | # out = tf.add(tf.matmul(dropf, Wout), bout) 176 | # return out 177 | 178 | # def cnnTrain(): 179 | # out = cnnLayer() 180 | 181 | # cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=out, labels=y_)) 182 | 183 | # train_step = tf.train.AdamOptimizer(0.01).minimize(cross_entropy) 184 | # # 比较标签是否相等,再求的所有数的平均值,tf.cast(强制转换类型) 185 | # accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(out, 1), tf.argmax(y_, 1)), tf.float32)) 186 | # # 将loss与accuracy保存以供tensorboard使用 187 | # tf.summary.scalar('loss', cross_entropy) 188 | # tf.summary.scalar('accuracy', accuracy) 189 | # merged_summary_op = tf.summary.merge_all() 190 | # # 数据保存器的初始化 191 | # saver = tf.train.Saver() 192 | 193 | # with tf.Session() as sess: 194 | 195 | # sess.run(tf.global_variables_initializer()) 196 | 197 | # summary_writer = tf.summary.FileWriter('./tmp', graph=tf.get_default_graph()) 198 | 199 | # for n in range(10): 200 | # # 每次取128(batch_size)张图片 201 | # for i in range(num_batch): 202 | # batch_x = train_x[i*batch_size : (i+1)*batch_size] 203 | # batch_y = train_y[i*batch_size : (i+1)*batch_size] 204 | # # 开始训练数据,同时训练三个变量,返回三个数据 205 | # _,loss,summary = sess.run([train_step, cross_entropy, merged_summary_op], 206 | # feed_dict={x:batch_x,y_:batch_y, keep_prob_5:0.5,keep_prob_75:0.75}) 207 | # summary_writer.add_summary(summary, n*num_batch+i) 208 | # # 打印损失 209 | # print(n*num_batch+i, loss) 210 | 211 | # if (n*num_batch+i) % 100 == 0: 212 | # # 获取测试数据的准确率 213 | # acc = accuracy.eval({x:test_x, y_:test_y, keep_prob_5:1.0, keep_prob_75:1.0}) 214 | # print(n*num_batch+i, acc) 215 | # # 准确率大于0.98时保存并退出 216 | # if acc > 0.98 and n > 2: 217 | # saver.save(sess, './train_faces.model', global_step=n*num_batch+i) 218 | # sys.exit(0) 219 | # print('accuracy less 0.98, exited!') 220 | 221 | # cnnTrain() -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # 基于opencv tenserflow2.0实战CNN人脸识别锁定与解锁win10屏幕 2 | # 前言 3 | windows hello的低阶板本,没有Windows hello的3D景深镜头,因此这是一个基于图片的识别机主的程序。 4 | 具体运行时,解锁时,判断是否是本人;若不是本人或无人(10s),锁屏;若是本人,正常使用;(采取无密码原始界面) 5 | 6 | 人脸的检测采取opencv cv2.CascadeClassifier 7 | 8 | 关于模型则采取 9 | ``` 10 | _________________________________________________________________ 11 | Layer (type) Output Shape Param # 12 | ================================================================= 13 | conv2d (Conv2D) (None, 62, 62, 128) 3584 14 | _________________________________________________________________ 15 | conv2d_1 (Conv2D) (None, 60, 60, 64) 73792 16 | _________________________________________________________________ 17 | flatten (Flatten) (None, 230400) 0 18 | _________________________________________________________________ 19 | dense (Dense) (None, 40) 9216040 20 | ================================================================= 21 | Total params: 9,293,416 22 | Trainable params: 9,293,416 23 | Non-trainable params: 0 24 | _________________________________________________________________ 25 | None 26 | ``` 27 | 28 | 基础需要由四部分组成。 29 | 30 | face_1.py|face_2.py|face_3.py|face_4.py 31 | -|-|-|- 32 | 制作自己人脸训练数据|由face_1.py 和 face_2.py制作的数据来进行CNN深度学习,并保存模型|由已知其他人脸来制作数据|最后的检测程序 33 | 34 | ## 运行python环境 35 | 主要是在tensorflow2.0-gpu下运行; 36 | 这里略微吐槽下tensorflow2.0.keras模块部分无提示,对于新手不太友好。 37 | conda list: 38 | 39 | Name|Version|Build Channel 40 | -|-|- 41 | _tflow_select|2.1.0| gpu 42 | absl-py| 0.8.1|py37_0 43 | altgraph| 0.17| pypi_0 pypi 44 | astor| 0.8.0|py37_0 45 | astroid| 2.3.3|py37_0 46 | backcall| 0.1.0|py37_0 47 | blas|1.0| mkl 48 | ca-certificates |2019.11.27|0 49 | certifi| 2019.11.28| py37_0 50 | colorama| 0.4.3|py_0 51 | cudatoolkit| 10.0.130|0 52 | cudnn| 7.6.5| cuda10.0_0 53 | cycler|0.10.0| pypi_0 pypi 54 | decorator| 4.4.1|py_0 55 | future|0.18.2| pypi_0 pypi 56 | gast|0.2.2|py37_0 57 | google-pasta| 0.1.8|py_0 58 | grpcio|1.16.1 | py37h351948d_1 59 | h5py|2.9.0|py37h5e291fa_0 60 | hdf5|1.10.4| h7ebc959_0 61 | icc_rt|2019.0.0|h0cc432a_1 62 | intel-openmp| 2019.4|245 63 | ipykernel| 5.1.3|py37h39e3cac_1 64 | ipython| 7.11.1 | py37h39e3cac_0 65 | ipython_genutils | 0.2.0|py37_0 66 | isort| 4.3.21| py37_0 67 | jedi|0.15.2| py37_0 68 | joblib|0.14.1| py_0 69 | jupyter_client|5.3.4|py37_0 70 | jupyter_core| 4.6.1|py37_0 71 | keras| 2.3.1|pypi_0 pypi 72 | keras-applications | 1.0.8|py_0 73 | keras-preprocessing |1.1.0|py_1 74 | kiwisolver| 1.1.0|pypi_0 pypi 75 | lazy-object-proxy | 1.4.3|py37he774522_0 76 | libprotobuf| 3.11.2| h7bd577a_0 77 | libsodium| 1.0.16| h9d3ae62_0 78 | markdown| 3.1.1|py37_0 79 | matplotlib| 3.1.2|pypi_0 pypi 80 | mccabe|0.6.1|py37_1 81 | mkl| 2019.4|245 82 | mkl-service| 2.3.0|py37hb782905_0 83 | mkl_fft| 1.0.15 | py37h14836fe_0 84 | mkl_random| 1.1.0|py37h675688f_0 85 | mouseinfo| 0.1.2|pypi_0 pypi 86 | numpy| 1.17.4 | py37h4320e6b_0 87 | numpy-base| 1.17.4 | py37hc3f5095_0 88 | opencv-python|4.1.2.30| pypi_0 pypi 89 | openssl| 1.1.1d| he774522_3 90 | opt_einsum| 3.1.0|py_0 91 | pandas|0.25.3| pypi_0 | pypi 92 | parso| 0.5.2|py_0 93 | pefile|2019.4.18| pypi_0 | pypi 94 | pickleshare| 0.7.5|py37_0 95 | pillow|7.0.0|pypi_0 |pypi 96 | pip| 19.3.1| py37_0 97 | prompt_toolkit|3.0.2|py_0 98 | protobuf| 3.11.2| py37h33f27b4_0 99 | pyautogui| 0.9.48| pypi_0 pypi 100 | pygetwindow| 0.0.8|pypi_0 pypi 101 | pygments| 2.5.2|py_0 102 | pyinstaller| 3.6|pypi_0 pypi 103 | pylint|2.4.4|py37_0 104 | pymsgbox| 1.0.7|pypi_0 pypi 105 | pyparsing| 2.4.6|pypi_0 pypi 106 | pyperclip| 1.7.0|pypi_0 pypi 107 | pyreadline| 2.1|py37_1 108 | pyrect|0.1.4|pypi_0 pypi 109 | pyscreeze| 0.1.26| pypi_0 pypi 110 | python|3.7.6| h60c2a47_2 111 | python-dateutil | 2.8.1|py_0 112 | pytweening| 1.0.3|pypi_0 pypi 113 | pytz|2019.3| pypi_0 pypi 114 | pywin32| 227| py37he774522_1 115 | pywin32-ctypes|0.2.0|pypi_0 pypi 116 | pyyaml|5.3|pypi_0 pypi 117 | pyzmq| 18.1.0 | py37ha925a31_0 118 | scikit-learn| 0.22.1 | py37h6288b17_0 119 | scipy| 1.3.2|py37h29ff71c_0 120 | setuptools| 44.0.0| py37_0 121 | six| 1.13.0| py37_0 122 | sqlite|3.30.1| he774522_0 123 | tensorboard| 2.0.0| pyhb38c66f_1 124 | tensorflow| 2.0.0 | gpu_py37h57d29ca_0 125 | tensorflow-base | 2.0.0 | gpu_py37h390e234_0 126 | tensorflow-estimator | 2.0.0| pyh2649769_0 127 | tensorflow-gpu|2.0.0| h0d30ee6_0 128 | termcolor| 1.1.0|py37_1 129 | tornado| 6.0.3|py37he774522_0 130 | traitlets| 4.3.3|py37_0 131 | vc| 14.1| h0510ff6_4 132 | vs2015_runtime|14.16.27012 | hf0eaf9b_1 133 | wcwidth| 0.1.7|py37_0 134 | werkzeug| 0.16.0| py_0 135 | wheel| 0.33.6| py37_0 136 | wincertstore| 0.2|py37_0 137 | wrapt| 1.11.2 py37he774522_0 138 | zeromq|4.3.1| h33f27b4_3 139 | zlib|1.2.11| h62dcd97_3 140 | 141 | ## 首先制作自己训练数据: 142 | 人脸数据存储至my_faces 可自己命名 143 | 144 | 145 | face_1.py 146 | ```python 147 | 148 | # 制作自己人脸数据 149 | 150 | from cv2 import cv2 151 | import os 152 | import sys 153 | import random 154 | 155 | out_dir = './my_faces' 156 | if not os.path.exists(out_dir): 157 | os.makedirs(out_dir) 158 | 159 | 160 | # 改变亮度与对比度 161 | def relight(img, alpha=1, bias=0): 162 | w = img.shape[1] 163 | h = img.shape[0] 164 | #image = [] 165 | for i in range(0,w): 166 | for j in range(0,h): 167 | for c in range(3): 168 | tmp = int(img[j,i,c]*alpha + bias) 169 | if tmp > 255: 170 | tmp = 255 171 | elif tmp < 0: 172 | tmp = 0 173 | img[j,i,c] = tmp 174 | return img 175 | 176 | 177 | # 获取分类器 178 | haar = cv2.CascadeClassifier(r'E:\ProgramData\Anaconda3\envs\tenserflow02\Lib\site-packages\cv2\data\haarcascade_frontalface_default.xml') 179 | 180 | # 打开摄像头 参数为输入流,可以为摄像头或视频文件 181 | camera = cv2.VideoCapture(0) 182 | 183 | n = 1 184 | while 1: 185 | if (n <= 5000): 186 | print('It`s processing %s image.' % n) 187 | # 读帧 188 | success, img = camera.read() 189 | gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 190 | faces = haar.detectMultiScale(gray_img, 1.3, 5) 191 | for f_x, f_y, f_w, f_h in faces: 192 | face = img[f_y:f_y+f_h, f_x:f_x+f_w] 193 | face = cv2.resize(face, (64,64)) 194 | 195 | face = relight(face, random.uniform(0.5, 1.5), random.randint(-50, 50)) 196 | cv2.imshow('img', face) 197 | cv2.imwrite(out_dir+'/'+str(n)+'.jpg', face) 198 | n+=1 199 | key = cv2.waitKey(30) & 0xff 200 | if key == 27: 201 | break 202 | else: 203 | break 204 | 205 | 206 | ``` 207 | ## 制作他人训练数据: 208 | 需要收集一个其他人脸的图片集,只要不是自己的人脸都可以,可以在网上找到,这里我给出一个我用到的图片集: 209 | 网站地址:http://vis-www.cs.umass.edu/lfw/ 210 | 图片集下载:http://vis-www.cs.umass.edu/lfw/lfw.tgz 211 | 先将下载的图片集,解压到项目目录下的lfw目录下,也可以自己指定目录(修改代码中的input_dir变量) 212 | 213 | face_3.py 214 | ```python 215 | # -*- codeing: utf-8 -*- 216 | import sys 217 | import os 218 | from cv2 import cv2 219 | 220 | input_dir = './lfw' 221 | output_dir = './other_faces' 222 | size = 64 223 | 224 | if not os.path.exists(output_dir): 225 | os.makedirs(output_dir) 226 | 227 | def close_cv2(): 228 | """删除cv窗口""" 229 | while(1): 230 | if(cv2.waitKey(100)==27): 231 | break 232 | cv2.destroyAllWindows() 233 | # 获取分类器 234 | haar = cv2.CascadeClassifier(r'E:\ProgramData\Anaconda3\envs\tenserflow02\Lib\site-packages\cv2\data\haarcascade_frontalface_default.xml') 235 | 236 | index = 1 237 | for (path, dirnames, filenames) in os.walk(input_dir): 238 | for filename in filenames: 239 | if filename.endswith('.jpg'): 240 | print('Being processed picture %s' % index) 241 | img_path = path+'/'+filename 242 | # # 从文件读取图片 243 | print(img_path) 244 | img = cv2.imread(img_path) 245 | # cv2.imshow(" ",img) 246 | # close_cv2() 247 | # 转为灰度图片 248 | 249 | gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 250 | faces = haar.detectMultiScale(gray_img, 1.3, 5) 251 | for f_x, f_y, f_w, f_h in faces: 252 | face = img[f_y:f_y+f_h, f_x:f_x+f_w] 253 | face = cv2.resize(face, (64,64)) 254 | 255 | # face = relight(face, random.uniform(0.5, 1.5), random.randint(-50, 50)) 256 | cv2.imshow('img', face) 257 | cv2.imwrite(output_dir+'/'+str(index)+'.jpg', face) 258 | index+=1 259 | key = cv2.waitKey(30) & 0xff 260 | if key == 27: 261 | sys.exit(0) 262 | ``` 263 | # 接下来进行数据训练 264 | 读取上文的 my_faces和other_faces文件夹下的训练数据进行训练 265 | 266 | face_2.py 267 | ```python 268 | # -*- codeing: utf-8 -*- 269 | from __future__ import absolute_import, division, print_function 270 | 271 | import tensorflow as tf 272 | from cv2 import cv2 273 | import numpy as np 274 | import os 275 | import random 276 | import sys 277 | from sklearn.model_selection import train_test_split 278 | from sklearn.metrics import classification_report 279 | # from keras import backend as K 280 | 281 | def getPaddingSize(img): 282 | h, w, _ = img.shape 283 | top, bottom, left, right = (0,0,0,0) 284 | longest = max(h, w) 285 | 286 | if w < longest: 287 | tmp = longest - w 288 | # //表示整除符号 289 | left = tmp // 2 290 | right = tmp - left 291 | elif h < longest: 292 | tmp = longest - h 293 | top = tmp // 2 294 | bottom = tmp - top 295 | else: 296 | pass 297 | return top, bottom, left, right 298 | 299 | def readData(path, h,w,imgs,labs): 300 | for filename in os.listdir(path): 301 | if filename.endswith('.jpg'): 302 | filename = path + '/' + filename 303 | 304 | img = cv2.imread(filename) 305 | # cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 306 | top,bottom,left,right = getPaddingSize(img) 307 | # 将图片放大, 扩充图片边缘部分 308 | img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=[0,0,0]) 309 | img = cv2.resize(img, (h, w)) 310 | 311 | imgs.append(img) 312 | labs.append(path) 313 | return imgs,labs 314 | 315 | 316 | 317 | 318 | def get_model(): 319 | model = tf.keras.Sequential() 320 | # 第一层卷积,卷积的数量为128,卷积的高和宽是3x3,激活函数使用relu 321 | model.add(tf.keras.layers.Conv2D(128, kernel_size=3, activation='relu', input_shape=(64, 64, 3))) 322 | # 第二层卷积 323 | model.add(tf.keras.layers.Conv2D(64, kernel_size=3, activation='relu')) 324 | #把多维数组压缩成一维,里面的操作可以简单理解为reshape,方便后面Dense使用 325 | model.add(tf.keras.layers.Flatten()) 326 | #对应cnn的全链接层,可以简单理解为把上面的小图汇集起来,进行分类 327 | model.add(tf.keras.layers.Dense(40, activation='softmax')) 328 | model.compile(optimizer='adam', 329 | loss='sparse_categorical_crossentropy', 330 | metrics=['accuracy']) 331 | return model 332 | 333 | def facemain(): 334 | my_faces_path = './my_faces' 335 | other_faces_path = './other_faces' 336 | size = 64 337 | 338 | imgs = [] 339 | labs = [] 340 | imgs,labs=readData(my_faces_path,size,size,imgs,labs) 341 | imgs,labs=readData(other_faces_path,size,size,imgs,labs) 342 | 343 | 344 | # 将图片数据与标签转换成数组 345 | imgs = np.array(imgs) 346 | # labs = np.array([[0,1] if lab == my_faces_path else [1,0] for lab in labs]) 347 | labs = np.array([[1] if lab == my_faces_path else [0] for lab in labs]) 348 | print(imgs.shape) 349 | print(labs.shape) 350 | # 随机划分测试集与训练集 351 | train_x,test_x,train_y,test_y = train_test_split(imgs, labs, test_size=0.8, random_state=random.randint(0,100)) 352 | 353 | # 参数:图片数据的总数,图片的高、宽、通道 354 | train_x = train_x.reshape(train_x.shape[0], size, size, 3) 355 | test_x = test_x.reshape(test_x.shape[0], size, size, 3) 356 | 357 | # 将数据转换成小于1的数 358 | train_x = train_x.astype('float32')/255.0 359 | test_x = test_x.astype('float32')/255.0 360 | 361 | print('train size:%s, test size:%s' % (len(train_x), len(test_x))) 362 | # 图片块,每次取100张图片 363 | batch_size = 100 364 | num_batch = len(train_x) // batch_size 365 | 366 | 367 | model=get_model() 368 | model.fit(train_x, train_y, epochs=5) 369 | model.save(r'C:\Users\Administrator\Desktop\my_model.h5') 370 | 371 | 372 | facemain() 373 | 374 | ``` 375 | 376 | # 最后进行预测判断是否是本人,以进行是否锁屏操作 377 | face_4.py 378 | ```python 379 | #识别自己 380 | from __future__ import absolute_import, division, print_function 381 | import tensorflow as tf 382 | 383 | from cv2 import cv2 384 | import os 385 | import sys 386 | import random 387 | import numpy as np 388 | from sklearn.model_selection import train_test_split 389 | from sklearn.metrics import classification_report 390 | from sklearn.metrics import cohen_kappa_score 391 | from ctypes import * 392 | import time 393 | import sys 394 | 395 | 396 | def getPaddingSize(img): 397 | h, w, _ = img.shape 398 | top, bottom, left, right = (0,0,0,0) 399 | longest = max(h, w) 400 | 401 | if w < longest: 402 | tmp = longest - w 403 | # //表示整除符号 404 | left = tmp // 2 405 | right = tmp - left 406 | elif h < longest: 407 | tmp = longest - h 408 | top = tmp // 2 409 | bottom = tmp - top 410 | else: 411 | pass 412 | return top, bottom, left, right 413 | 414 | def readData(path, h,w,imgs,labs): 415 | for filename in os.listdir(path): 416 | if filename.endswith('.jpg'): 417 | filename = path + '/' + filename 418 | 419 | img = cv2.imread(filename) 420 | # cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 421 | top,bottom,left,right = getPaddingSize(img) 422 | # 将图片放大, 扩充图片边缘部分 423 | img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=[0,0,0]) 424 | img = cv2.resize(img, (h, w)) 425 | 426 | imgs.append(img) 427 | labs.append(path) 428 | return imgs,labs 429 | # 改变亮度与对比度 430 | def relight(img, alpha=1, bias=0): 431 | w = img.shape[1] 432 | h = img.shape[0] 433 | #image = [] 434 | for i in range(0,w): 435 | for j in range(0,h): 436 | for c in range(3): 437 | tmp = int(img[j,i,c]*alpha + bias) 438 | if tmp > 255: 439 | tmp = 255 440 | elif tmp < 0: 441 | tmp = 0 442 | img[j,i,c] = tmp 443 | return img 444 | 445 | out_dir = './temp_faces' 446 | if not os.path.exists(out_dir): 447 | os.makedirs(out_dir) 448 | 449 | # 获取分类器 450 | haar = cv2.CascadeClassifier(r'E:\ProgramData\Anaconda3\envs\tenserflow02\Lib\site-packages\cv2\data\haarcascade_frontalface_default.xml') 451 | 452 | # 打开摄像头 参数为输入流,可以为摄像头或视频文件 453 | camera = cv2.VideoCapture(0) 454 | n = 1 455 | 456 | start = time.clock() 457 | while 1: 458 | if (n <= 20): 459 | print('It`s processing %s image.' % n) 460 | # 读帧 461 | success, img = camera.read() 462 | gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 463 | faces = haar.detectMultiScale(gray_img, 1.3, 5) 464 | for f_x, f_y, f_w, f_h in faces: 465 | face = img[f_y:f_y+f_h, f_x:f_x+f_w] 466 | face = cv2.resize(face, (64,64)) 467 | # face = relight(face, random.uniform(0.5, 1.5), random.randint(-50, 50)) 468 | cv2.imshow('img', face) 469 | cv2.imwrite(out_dir+'/'+str(n)+'.jpg', face) 470 | n+=1 471 | key = cv2.waitKey(30) & 0xff 472 | if key == 27: 473 | break 474 | end = time.clock() 475 | print(str(end-start)) 476 | if (end-start)>10: 477 | user32 = windll.LoadLibrary('user32.dll') 478 | user32.LockWorkStation() 479 | sys.exit() 480 | else: 481 | break 482 | 483 | 484 | my_faces_path = out_dir 485 | size = 64 486 | 487 | imgs = [] 488 | labs = [] 489 | imgs,labs=readData(my_faces_path,size,size,imgs,labs) 490 | # 将图片数据与标签转换成数组 491 | imgs = np.array(imgs) 492 | # labs = np.array([[0,1] if lab == my_faces_path else [1,0] for lab in labs]) 493 | labs = np.array([[1] if lab == my_faces_path else [0] for lab in labs]) 494 | # 随机划分测试集与训练集 495 | train_x,test_x,train_y,test_y = train_test_split(imgs, labs, test_size=0.9, random_state=random.randint(0,100)) 496 | 497 | # 参数:图片数据的总数,图片的高、宽、通道 498 | train_x = train_x.reshape(train_x.shape[0], size, size, 3) 499 | test_x = test_x.reshape(test_x.shape[0], size, size, 3) 500 | 501 | # 将数据转换成小于1的数 502 | train_x = train_x.astype('float32')/255.0 503 | test_x = test_x.astype('float32')/255.0 504 | 505 | restored_model = tf.keras.models.load_model(r'C:\Users\Administrator\Desktop\my_model.h5') 506 | pre_result=restored_model.predict_classes(test_x) 507 | print(pre_result.shape) 508 | print(pre_result) 509 | acc=sum(pre_result==1)/pre_result.shape[0] 510 | print("相似度: "+str(acc)) 511 | 512 | if acc > 0.8: 513 | print("你是***") 514 | else: 515 | user32 = windll.LoadLibrary('user32.dll') 516 | user32.LockWorkStation() 517 | ``` 518 | 519 | ## 最后一步,添加face_4.py解锁windows运行任务计划程序库 520 | ### myface.bat 文件 521 | 激活Anaconda环境 522 | 切CD至face_4.py的位置 523 | ``` 524 | call activate tensorflow02 525 | cd /d E:\ziliao\LearningPy\face 526 | python face_4.py 527 | ``` 528 | ### hide.vbs文件以隐藏程序运行时的cmd 529 | ``` 530 | Set ws = CreateObject("Wscript.Shell") 531 | ws.run "cmd /c E:\ziliao\LearningPy\face\myface.bat",vbhide 532 | ``` 533 | ### 添加hide.vbs任务计划库中 534 | 创建任务 535 | 536 | 常规中|触发器|操作 537 | -|-|- 538 | 最高权限 选择对应系统win10|添加 工作站解锁时|添加hide.vbs 539 | 540 | # 参考: 541 | * https://www.cnblogs.com/mu---mu/p/FaceRecognition-tensorflow.html 542 | * https://github.com/saksham-jain/Lock-Unlock-Laptop-PC-Screen-Using-Face-Recognition 543 | 544 | # CSDN:https://blog.csdn.net/weixin_42348202/article/details/104071199 545 | --------------------------------------------------------------------------------