├── README.md ├── face.py ├── facetrace.py ├── faceui.py ├── ico.ico ├── ico.png ├── main.py ├── pface.py └── trygrabcut.py /README.md: -------------------------------------------------------------------------------- 1 | # PFace 2 | face-p-easy:Python大作业实现视频人脸p图或替换 3 | 4 | 运行Pface.py文件,打开UI界面。 5 | pface.py继承于faceui.py实现基于PyQt的界面 6 | 默认替换模式,包括face.py(替换模块)、main.py(视频截取与合成并调用face.py) 7 | 勾选P图模式,包括facetrace.py(启动视频并人脸追踪)、trygrabcut.py(边缘检测) 8 | 9 | 模型文件shape_predictor_68_face_landmarks.dat(dlib库提取68个人脸特征点的模型文件,需要下载到项目路径下) 10 | 11 | 需要的库文件:dlib、cv2、PyQt、numpy、shutil 12 | -------------------------------------------------------------------------------- /face.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import dlib 3 | import numpy 4 | 5 | import sys 6 | 7 | SCALE_FACTOR = 1 8 | FEATHER_AMOUNT = 11 9 | 10 | FACE_POINTS = list(range(17, 68)) 11 | MOUTH_POINTS = list(range(48, 61)) 12 | RIGHT_BROW_POINTS = list(range(17, 22)) 13 | LEFT_BROW_POINTS = list(range(22, 27)) 14 | RIGHT_EYE_POINTS = list(range(36, 42)) 15 | LEFT_EYE_POINTS = list(range(42, 48)) 16 | NOSE_POINTS = list(range(27, 35)) 17 | JAW_POINTS = list(range(0, 17)) 18 | 19 | # Points used to line up the images. 20 | ALIGN_POINTS = (LEFT_BROW_POINTS + RIGHT_EYE_POINTS + LEFT_EYE_POINTS + 21 | RIGHT_BROW_POINTS + NOSE_POINTS + MOUTH_POINTS) 22 | 23 | # Points from the second image to overlay on the first. The convex hull of each 24 | # element will be overlaid. 25 | OVERLAY_POINTS = [ 26 | LEFT_EYE_POINTS + RIGHT_EYE_POINTS + LEFT_BROW_POINTS + RIGHT_BROW_POINTS, 27 | NOSE_POINTS + MOUTH_POINTS, 28 | ] 29 | 30 | # Amount of blur to use during colour correction, as a fraction of the 31 | # pupillary distance. 32 | COLOUR_CORRECT_BLUR_FRAC = 0.6 33 | 34 | detector = dlib.get_frontal_face_detector() 35 | predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat") 36 | 37 | class TooManyFaces(Exception): 38 | pass 39 | 40 | class NoFaces(Exception): 41 | pass 42 | 43 | def get_landmarks(im): 44 | rects = detector(im, 1) 45 | 46 | if len(rects) > 1: 47 | return [] 48 | if len(rects) == 0: 49 | return [] 50 | 51 | return numpy.matrix([[p.x, p.y] for p in predictor(im, rects[0]).parts()]) 52 | 53 | def annotate_landmarks(im, landmarks): 54 | im = im.copy() 55 | for idx, point in enumerate(landmarks): 56 | pos = (point[0, 0], point[0, 1]) 57 | cv2.putText(im, str(idx), pos, 58 | fontFace=cv2.FONT_HERSHEY_SCRIPT_SIMPLEX, 59 | fontScale=0.4, 60 | color=(0, 0, 255)) 61 | cv2.circle(im, pos, 3, color=(0, 255, 255)) 62 | return im 63 | 64 | def draw_convex_hull(im, points, color): 65 | points = cv2.convexHull(points) 66 | cv2.fillConvexPoly(im, points, color=color) 67 | 68 | def get_face_mask(im, landmarks): 69 | im = numpy.zeros(im.shape[:2], dtype=numpy.float64) 70 | 71 | for group in OVERLAY_POINTS: 72 | draw_convex_hull(im, 73 | landmarks[group], 74 | color=1) 75 | 76 | im = numpy.array([im, im, im]).transpose((1, 2, 0)) 77 | 78 | im = (cv2.GaussianBlur(im, (FEATHER_AMOUNT, FEATHER_AMOUNT), 0) > 0) * 1.0 79 | im = cv2.GaussianBlur(im, (FEATHER_AMOUNT, FEATHER_AMOUNT), 0) 80 | 81 | return im 82 | 83 | def transformation_from_points(points1, points2): 84 | """ 85 | Return an affine transformation [s * R | T] such that: 86 | sum ||s*R*p1,i + T - p2,i||^2 87 | is minimized. 88 | """ 89 | # Solve the procrustes problem by subtracting centroids, scaling by the 90 | # standard deviation, and then using the SVD to calculate the rotation. See 91 | # the following for more details: 92 | # https://en.wikipedia.org/wiki/Orthogonal_Procrustes_problem 93 | 94 | points1 = points1.astype(numpy.float64) 95 | points2 = points2.astype(numpy.float64) 96 | 97 | c1 = numpy.mean(points1, axis=0) 98 | c2 = numpy.mean(points2, axis=0) 99 | points1 -= c1 100 | points2 -= c2 101 | 102 | s1 = numpy.std(points1) 103 | s2 = numpy.std(points2) 104 | points1 /= s1 105 | points2 /= s2 106 | 107 | U, S, Vt = numpy.linalg.svd(points1.T * points2) 108 | 109 | # The R we seek is in fact the transpose of the one given by U * Vt. This 110 | # is because the above formulation assumes the matrix goes on the right 111 | # (with row vectors) where as our solution requires the matrix to be on the 112 | # left (with column vectors). 113 | R = (U * Vt).T 114 | 115 | return numpy.vstack([numpy.hstack(((s2 / s1) * R, 116 | c2.T - (s2 / s1) * R * c1.T)), 117 | numpy.matrix([0., 0., 1.])]) 118 | 119 | def read_im_and_landmarks(fname): 120 | im = cv2.imread(fname, cv2.IMREAD_COLOR) 121 | im = cv2.resize(im, (im.shape[1] * SCALE_FACTOR, 122 | im.shape[0] * SCALE_FACTOR)) 123 | s = get_landmarks(im) 124 | 125 | return im, s 126 | 127 | def warp_im(im, M, dshape): 128 | output_im = numpy.zeros(dshape, dtype=im.dtype) 129 | cv2.warpAffine(im, 130 | M[:2], 131 | (dshape[1], dshape[0]), 132 | dst=output_im, 133 | borderMode=cv2.BORDER_TRANSPARENT, 134 | flags=cv2.WARP_INVERSE_MAP) 135 | return output_im 136 | 137 | def correct_colours(im1, im2, landmarks1): 138 | blur_amount = COLOUR_CORRECT_BLUR_FRAC * numpy.linalg.norm( 139 | numpy.mean(landmarks1[LEFT_EYE_POINTS], axis=0) - 140 | numpy.mean(landmarks1[RIGHT_EYE_POINTS], axis=0)) 141 | blur_amount = int(blur_amount) 142 | if blur_amount % 2 == 0: 143 | blur_amount += 1 144 | im1_blur = cv2.GaussianBlur(im1, (blur_amount, blur_amount), 0) 145 | im2_blur = cv2.GaussianBlur(im2, (blur_amount, blur_amount), 0) 146 | 147 | 148 | # Avoid divide-by-zero errors. 149 | im2_blur += (128 * (im2_blur <= 1.0)).astype(numpy.uint8) 150 | 151 | return (im2.astype(numpy.float64) * im1_blur.astype(numpy.float64) / 152 | im2_blur.astype(numpy.float64)) 153 | def main(pra1,im2,landmarks2): 154 | im1, landmarks1 = read_im_and_landmarks(pra1) 155 | #im2, landmarks2 = read_im_and_landmarks(pra2) 156 | #print(landmarks1) 157 | if (len(landmarks1)==0): 158 | print("no face or many faces") 159 | return im1,1 160 | M = transformation_from_points(landmarks1[ALIGN_POINTS], 161 | landmarks2[ALIGN_POINTS]) 162 | 163 | mask = get_face_mask(im2, landmarks2) 164 | warped_mask = warp_im(mask, M, im1.shape) 165 | combined_mask = numpy.max([get_face_mask(im1, landmarks1), warped_mask], 166 | axis=0) 167 | 168 | warped_im2 = warp_im(im2, M, im1.shape) 169 | warped_corrected_im2 = correct_colours(im1, warped_im2, landmarks1) 170 | 171 | output_im = im1 * (1.0 - combined_mask) + warped_corrected_im2 * combined_mask 172 | return output_im,0 173 | #cv2.imwrite('output2.jpg', output_im) 174 | -------------------------------------------------------------------------------- /facetrace.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import sys 3 | import dlib 4 | import cv2 5 | import numpy as np 6 | import os 7 | import time 8 | import trygrabcut 9 | 10 | video_path='./test.mp4' 11 | IMG='./5.jpg' 12 | OUT_FOLDER='./out_folder' 13 | OUT_VIDEO='' 14 | OUT_FREQUENCY = 24 #帧输出率 15 | 16 | class myCorrelationTracker(object): 17 | def __init__(self, windowName='default window', cameraNum=0): 18 | # 自定义几个状态标志 19 | self.STATUS_RUN_WITHOUT_TRACKER = 0 # 不跟踪目标,但是实时显示 20 | self.STATUS_RUN_WITH_TRACKER = 1 # 跟踪目标,实时显示 21 | self.STATUS_PAUSE = 2 # 暂停,卡在当前帧 22 | self.STATUS_BREAK = 3 # 退出 23 | self.status = self.STATUS_RUN_WITHOUT_TRACKER # 指示状态的变量 24 | self.track_flag=False#是否跟踪 25 | 26 | # 这几个跟前面程序1定义的变量一样 27 | self.track_window = None # 实时跟踪鼠标的跟踪区域 28 | self.drag_start = None # 要检测的物体所在区域 29 | self.start_flag = True # 标记,是否开始拖动鼠标 30 | self.selection = None #追加默认未选择区域 31 | # 创建好显示窗口 32 | cv2.namedWindow(windowName, cv2.WINDOW_AUTOSIZE) 33 | cv2.setMouseCallback(windowName, self.onMouseClicked) 34 | self.windowName = windowName 35 | 36 | # 打开摄像头 37 | self.cap = cv2.VideoCapture(video_path) 38 | self.target = cv2.imread(IMG) 39 | self.target = trygrabcut.call(self.target) 40 | 41 | # correlation_tracker()类,跟踪器,跟程序1中一样 42 | self.tracker = dlib.correlation_tracker() 43 | self.detector = dlib.get_frontal_face_detector() 44 | # 当前帧 45 | self.frame = None 46 | # 当前帧index 47 | self.index=0 48 | 49 | # 按键处理函数 50 | def keyEventHandler(self): 51 | keyValue = cv2.waitKey(5) # 每隔5ms读取一次按键的键值 52 | if keyValue == 27: # ESC 53 | self.status = self.STATUS_BREAK 54 | if keyValue == 32: # 空格 55 | if self.status != self.STATUS_PAUSE: # 按下空格,暂停播放,可以选定跟踪的区域 56 | #print self.status 57 | self.status = self.STATUS_PAUSE 58 | #print self.status 59 | else: # 再按次空格,重新播放,但是不进行目标识别 60 | if self.track_window: 61 | self.status = self.STATUS_RUN_WITH_TRACKER 62 | self.start_flag = True 63 | else: 64 | self.status = self.STATUS_RUN_WITHOUT_TRACKER 65 | if keyValue == 13: # 回车 66 | #print '**' 67 | if self.status == self.STATUS_PAUSE: # 按下空格之后 68 | if self.track_window: # 如果选定了区域,再按回车,表示确定选定区域为跟踪目标 69 | self.status = self.STATUS_RUN_WITH_TRACKER 70 | self.start_flag = True 71 | 72 | # 任务处理函数 73 | def processHandler(self): 74 | # 不跟踪目标,但是实时显示 75 | if self.status == self.STATUS_RUN_WITHOUT_TRACKER: 76 | rects=[] 77 | while len(rects)==0: 78 | ret, self.frame = self.cap.read() 79 | if self.frame is None: 80 | self.status=self.STATUS_BREAK 81 | else: 82 | if self.start_flag: 83 | self.status=self.STATUS_PAUSE 84 | rects = self.detector(self.frame, 1) 85 | self.get_section(rects[0]) 86 | self.status = self.STATUS_RUN_WITH_TRACKER 87 | cv2.imshow(self.windowName, self.frame) 88 | # 暂停,暂停时使用鼠标拖动红框,选择目标区域,与程序1类似 89 | elif self.status == self.STATUS_PAUSE: 90 | img_first = self.frame.copy() # 不改变原来的帧,拷贝一个新的变量出来 91 | if self.track_window: # 跟踪目标的窗口画出来了,就实时标出来 92 | cv2.rectangle(img_first, (self.track_window[0], self.track_window[1]), (self.track_window[2], self.track_window[3]), (0,0,255), 1) 93 | elif self.selection: # 跟踪目标的窗口随鼠标拖动实时显示 94 | cv2.rectangle(img_first, (self.selection[0], self.selection[1]), (self.selection[2], self.selection[3]), (0,0,255), 1) 95 | cv2.imshow(self.windowName, img_first) 96 | # 退出 97 | elif self.status == self.STATUS_BREAK: 98 | self.cap.release() # 释放摄像头 99 | if self.frame is None: 100 | self.save_video(OUT_FOLDER) 101 | cv2.destroyAllWindows() # 释放窗口 102 | os._exit(1) # 退出程序 103 | # 跟踪目标,实时显示 104 | elif self.status == self.STATUS_RUN_WITH_TRACKER: 105 | ret, self.frame = self.cap.read() # 从摄像头读取一帧 106 | if self.frame is None: 107 | self.status=self.STATUS_BREAK 108 | return 109 | rects=self.detector(self.frame,1) 110 | 111 | if len(rects)==0: 112 | if self.track_flag==False: 113 | self.start_flag=True 114 | self.track_flag=True 115 | else: 116 | self.start_flag=False 117 | else: 118 | self.track_flag=False 119 | self.start_flag=True#更新track_window 120 | self.get_section(rects[0]) 121 | 122 | if self.track_flag: 123 | if self.start_flag: # 如果是第一帧,需要先初始化 124 | self.tracker.start_track(self.frame, dlib.rectangle(self.track_window[0], self.track_window[1], self.track_window[2], self.track_window[3])) # 开始跟踪目标 125 | self.start_flag = False # 不再是第一帧 126 | else: 127 | if self.frame is None: 128 | self.status=self.STATUS_BREAK 129 | return 130 | self.tracker.update(self.frame) # 更新 131 | 132 | # 得到目标的位置,并显示 133 | box_predict = self.tracker.get_position() 134 | else: 135 | box_predict=rects[0] 136 | #cv2.rectangle(self.frame,(int(box_predict.left()),int(box_predict.top())),(int(box_predict.right()),int(box_predict.bottom())),(0,255,255),1) 137 | self.face_change(box_predict,self.frame) 138 | cv2.imshow(self.windowName, self.frame) 139 | save_path = "{}/{}.jpg".format(OUT_FOLDER, self.index) 140 | cv2.imwrite(save_path, self.frame) 141 | self.index += 1 142 | 143 | 144 | # 鼠标点击事件回调函数 145 | def onMouseClicked(self, event, x, y, flags, param): 146 | if event == cv2.EVENT_LBUTTONDOWN: # 鼠标左键按下 147 | self.drag_start = (x, y) 148 | self.track_window = None 149 | if self.drag_start: # 是否开始拖动鼠标,记录鼠标位置 150 | xMin = min(x, self.drag_start[0]) 151 | yMin = min(y, self.drag_start[1]) 152 | xMax = max(x, self.drag_start[0]) 153 | yMax = max(y, self.drag_start[1]) 154 | self.selection = (xMin, yMin, xMax, yMax) 155 | if event == cv2.EVENT_LBUTTONUP: # 鼠标左键松开 156 | self.drag_start = None 157 | self.track_window = self.selection 158 | self.selection = None 159 | 160 | def get_section(self,img): 161 | xMin=img.left() 162 | yMin=img.top() 163 | xMax=img.right() 164 | yMax=img.bottom() 165 | self.selection = (xMin, yMin, xMax, yMax) 166 | self.track_window=self.selection 167 | 168 | def face_change(self,img,sample_image): 169 | x=int(img.left()) 170 | y=int(img.top()) 171 | w=int(img.right())-x 172 | h=int(img.bottom())-y 173 | 174 | temp=(0,0,0) 175 | #rect=cv2.rectangle(sample_image, (x, y), (x+w, y+h), (0, 255, 0), 2) 176 | temp = cv2.resize(self.target,(w,h),cv2.INTER_CUBIC) 177 | dilate=self.cut(self.target) 178 | dilate=cv2.resize(dilate,(w,h),cv2.INTER_CUBIC) 179 | for i in range(h): 180 | for j in range(w): 181 | if dilate[i,j]==0: 182 | sample_image[y+i,x+j]=temp[i,j] 183 | 184 | def cut(self,tar):#待定算法 185 | img=tar 186 | #日常缩放 187 | rows,cols,channels = img.shape#rows,cols最后一定要是前景图片的,后面遍历图片需要用到 188 | 189 | #转换hsv 190 | hsv=cv2.cvtColor(img,cv2.COLOR_BGR2HSV) 191 | #获取mask 192 | lower_blue=np.array([0,0,221]) 193 | upper_blue=np.array([180,30,255])#白色 194 | mask = cv2.inRange(hsv, lower_blue, upper_blue) 195 | #cv2.imshow('Mask', mask) 196 | 197 | #腐蚀膨胀 198 | erode=cv2.erode(mask,None,iterations=1) 199 | dilate=cv2.dilate(erode,None,iterations=1) 200 | #cv2.imshow('res',dilate) 201 | return dilate 202 | 203 | def save_video(self,path): 204 | global OUT_VIDEO 205 | if OUT_VIDEO!='': 206 | OUT_VIDEO+='/' 207 | filelist = os.listdir(path) #获取该目录下的所有文件名 208 | if len(filelist)==0: 209 | return 210 | 211 | height,width,layers=cv2.imread(path+"/"+filelist[0]).shape 212 | size=(width,height) 213 | print(size) 214 | ''' 215 | fps: 216 | 帧率:1秒钟有n张图片写进去[控制一张图片停留5秒钟,那就是帧率为1,重复播放这张图片5次] 217 | 如果文件夹下有50张 534*300的图片,这里设置1秒钟播放5张,那么这个视频的时长就是10秒 218 | ''' 219 | 220 | file_path = OUT_VIDEO+str(int(time.time())) + ".avi"#导出路径 221 | fourcc = cv2.VideoWriter_fourcc('I', '4', '2', '0')#不同视频编码对应不同视频格式(例:'I','4','2','0' 对应avi格式) 222 | video = cv2.VideoWriter( file_path, fourcc, OUT_FREQUENCY, size ) 223 | 224 | for item in range(1,len(filelist)+1): 225 | item = path +'/'+ str(item)+'.jpg' 226 | #print(item) 227 | img = cv2.imread(item) #使用opencv读取图像,直接返回numpy.ndarray 对象,通道顺序为BGR ,注意是BGR,通道值默认范围0-255。 228 | video.write(img) #把图片写进视频 229 | video.release() #释放 230 | 231 | def run(self): 232 | while(1): 233 | self.keyEventHandler() 234 | self.processHandler() 235 | 236 | def ex_run(v=video_path,t=IMG,o=OUT_VIDEO): 237 | global video_path,IMG,OUT_VIDEO 238 | video_path=v 239 | IMG=t 240 | OUT_VIDEO=o 241 | import shutil 242 | try: 243 | shutil.rmtree(OUT_FOLDER) 244 | except OSError: 245 | pass 246 | 247 | os.mkdir(OUT_FOLDER) 248 | testTracker = myCorrelationTracker(windowName='image', cameraNum=1) 249 | testTracker.run() 250 | 251 | ##if __name__ == '__main__': 252 | ## ex_run() 253 | ## 254 | -------------------------------------------------------------------------------- /faceui.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # Form implementation generated from reading ui file 'face_change.ui' 4 | # 5 | # Created by: PyQt5 UI code generator 5.11.3 6 | # 7 | # WARNING! All changes made in this file will be lost! 8 | 9 | from PyQt5 import QtCore, QtGui, QtWidgets 10 | 11 | class Ui_MainWindow(object): 12 | def setupUi(self, MainWindow): 13 | MainWindow.setObjectName("MainWindow") 14 | MainWindow.resize(551, 417) 15 | sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed) 16 | sizePolicy.setHorizontalStretch(0) 17 | sizePolicy.setVerticalStretch(0) 18 | sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth()) 19 | MainWindow.setSizePolicy(sizePolicy) 20 | self.centralwidget = QtWidgets.QWidget(MainWindow) 21 | self.centralwidget.setObjectName("centralwidget") 22 | self.gridLayoutWidget = QtWidgets.QWidget(self.centralwidget) 23 | self.gridLayoutWidget.setGeometry(QtCore.QRect(30, 20, 501, 211)) 24 | self.gridLayoutWidget.setObjectName("gridLayoutWidget") 25 | self.gridLayout = QtWidgets.QGridLayout(self.gridLayoutWidget) 26 | self.gridLayout.setContentsMargins(0, 0, 0, 0) 27 | self.gridLayout.setObjectName("gridLayout") 28 | self.formLayout = QtWidgets.QFormLayout() 29 | self.formLayout.setObjectName("formLayout") 30 | self.label_2 = QtWidgets.QLabel(self.gridLayoutWidget) 31 | self.label_2.setObjectName("label_2") 32 | self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.label_2) 33 | self.progressBar = QtWidgets.QProgressBar(self.gridLayoutWidget) 34 | self.progressBar.setProperty("value", 24) 35 | self.progressBar.setObjectName("progressBar") 36 | self.formLayout.setWidget(1, QtWidgets.QFormLayout.SpanningRole, self.progressBar) 37 | self.pushButton_4 = QtWidgets.QPushButton(self.gridLayoutWidget) 38 | self.pushButton_4.setObjectName("pushButton_4") 39 | self.formLayout.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.pushButton_4) 40 | self.gridLayout.addLayout(self.formLayout, 3, 0, 1, 1) 41 | self.horizontalLayout = QtWidgets.QHBoxLayout() 42 | self.horizontalLayout.setObjectName("horizontalLayout") 43 | self.verticalLayout = QtWidgets.QVBoxLayout() 44 | self.verticalLayout.setObjectName("verticalLayout") 45 | self.pushButton_2 = QtWidgets.QPushButton(self.gridLayoutWidget) 46 | self.pushButton_2.setEnabled(True) 47 | sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed) 48 | sizePolicy.setHorizontalStretch(0) 49 | sizePolicy.setVerticalStretch(0) 50 | sizePolicy.setHeightForWidth(self.pushButton_2.sizePolicy().hasHeightForWidth()) 51 | self.pushButton_2.setSizePolicy(sizePolicy) 52 | self.pushButton_2.setObjectName("pushButton_2") 53 | self.verticalLayout.addWidget(self.pushButton_2) 54 | self.pushButton = QtWidgets.QPushButton(self.gridLayoutWidget) 55 | sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum) 56 | sizePolicy.setHorizontalStretch(0) 57 | sizePolicy.setVerticalStretch(0) 58 | sizePolicy.setHeightForWidth(self.pushButton.sizePolicy().hasHeightForWidth()) 59 | self.pushButton.setSizePolicy(sizePolicy) 60 | self.pushButton.setMinimumSize(QtCore.QSize(0, 23)) 61 | self.pushButton.setMaximumSize(QtCore.QSize(16777215, 23)) 62 | self.pushButton.setObjectName("pushButton") 63 | self.verticalLayout.addWidget(self.pushButton) 64 | self.pushButton_3 = QtWidgets.QPushButton(self.gridLayoutWidget) 65 | self.pushButton_3.setObjectName("pushButton_3") 66 | self.verticalLayout.addWidget(self.pushButton_3) 67 | self.horizontalLayout.addLayout(self.verticalLayout) 68 | self.verticalLayout_3 = QtWidgets.QVBoxLayout() 69 | self.verticalLayout_3.setObjectName("verticalLayout_3") 70 | self.lineEdit = QtWidgets.QLineEdit(self.gridLayoutWidget) 71 | self.lineEdit.setEnabled(True) 72 | self.lineEdit.setObjectName("lineEdit") 73 | self.verticalLayout_3.addWidget(self.lineEdit) 74 | self.lineEdit_2 = QtWidgets.QLineEdit(self.gridLayoutWidget) 75 | self.lineEdit_2.setEnabled(True) 76 | self.lineEdit_2.setObjectName("lineEdit_2") 77 | self.verticalLayout_3.addWidget(self.lineEdit_2) 78 | self.lineEdit_3 = QtWidgets.QLineEdit(self.gridLayoutWidget) 79 | self.lineEdit_3.setObjectName("lineEdit_3") 80 | self.verticalLayout_3.addWidget(self.lineEdit_3) 81 | self.horizontalLayout.addLayout(self.verticalLayout_3) 82 | self.gridLayout.addLayout(self.horizontalLayout, 0, 0, 1, 1) 83 | self.horizontalLayout_2 = QtWidgets.QHBoxLayout() 84 | self.horizontalLayout_2.setObjectName("horizontalLayout_2") 85 | self.checkBox = QtWidgets.QCheckBox(self.gridLayoutWidget) 86 | self.checkBox.setEnabled(True) 87 | self.checkBox.setObjectName("checkBox") 88 | self.horizontalLayout_2.addWidget(self.checkBox) 89 | self.checkBox_2 = QtWidgets.QCheckBox(self.gridLayoutWidget) 90 | self.checkBox_2.setObjectName("checkBox_2") 91 | self.horizontalLayout_2.addWidget(self.checkBox_2) 92 | self.gridLayout.addLayout(self.horizontalLayout_2, 1, 0, 1, 1) 93 | MainWindow.setCentralWidget(self.centralwidget) 94 | self.statusbar = QtWidgets.QStatusBar(MainWindow) 95 | self.statusbar.setObjectName("statusbar") 96 | MainWindow.setStatusBar(self.statusbar) 97 | self.menubar = QtWidgets.QMenuBar(MainWindow) 98 | self.menubar.setGeometry(QtCore.QRect(0, 0, 551, 23)) 99 | self.menubar.setObjectName("menubar") 100 | self.menu = QtWidgets.QMenu(self.menubar) 101 | self.menu.setObjectName("menu") 102 | MainWindow.setMenuBar(self.menubar) 103 | self.menubar.addAction(self.menu.menuAction()) 104 | 105 | self.retranslateUi(MainWindow) 106 | QtCore.QMetaObject.connectSlotsByName(MainWindow) 107 | 108 | def retranslateUi(self, MainWindow): 109 | _translate = QtCore.QCoreApplication.translate 110 | MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow")) 111 | self.label_2.setText(_translate("MainWindow", "等待中")) 112 | self.pushButton_4.setText(_translate("MainWindow", "开始替换")) 113 | self.pushButton_2.setText(_translate("MainWindow", "选择视频")) 114 | self.pushButton.setText(_translate("MainWindow", "选择照片")) 115 | self.pushButton_3.setText(_translate("MainWindow", "保存路径")) 116 | self.checkBox.setText(_translate("MainWindow", "保留失败帧")) 117 | self.checkBox_2.setText(_translate("MainWindow", "P图模式")) 118 | self.menu.setTitle(_translate("MainWindow", "特征脸替换")) 119 | 120 | -------------------------------------------------------------------------------- /ico.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xxc2016/PFace/e5830854f42f0fa204b9d6a0300e63736e6c6739/ico.ico -------------------------------------------------------------------------------- /ico.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xxc2016/PFace/e5830854f42f0fa204b9d6a0300e63736e6c6739/ico.png -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # author xxc 3 | # function 视频换脸 4 | # date 2018/8/22 5 | # 全局变量 6 | VIDEO_PATH = './test.mp4' # 视频地址 7 | EXTRACT_FOLDER = './extract_folder' # 存放帧图片的位置 8 | OUT_FOLDER='./out_folder' 9 | EXTRACT_FREQUENCY = 0.5 # 帧提取频率 10 | OUT_FREQUENCY = 24 #帧输出率 11 | TARGET_IMG='./7.jpg' #目标人物 12 | scaleFactor=1.2 #人脸检测变化尺度1.0~ 13 | minNeighbors=3 #最小近邻矩阵数3~ 14 | flag=0 #原位补脸 15 | 16 | import os 17 | import cv2 18 | import dlib 19 | import time 20 | import numpy as np 21 | import face 22 | 23 | detector = dlib.get_frontal_face_detector() 24 | predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat") 25 | sum_pic=0 26 | 27 | def get_landmarks(im): 28 | rects = detector(im, 1) 29 | return rects 30 | 31 | def extract_frames(video_path, dst_folder, index): 32 | # 主操作 33 | 34 | video = cv2.VideoCapture() 35 | if not video.open(video_path): 36 | print("can not open the video") 37 | os._exit(1) 38 | count = 1 39 | while True: 40 | _, frame = video.read()#frame每帧图片 41 | if frame is None: 42 | break 43 | #if count % EXTRACT_FREQUENCY == 0: 44 | save_path = "{}/{:>03d}.jpg".format(dst_folder, index) 45 | cv2.imwrite(save_path, frame) 46 | index += 1 47 | count += 1 48 | #OUT_FREQUENCY=(int)(index-1/video.duration) 49 | video.release() 50 | # 打印出所提取帧的总数 51 | print("Totally save {:d} pics".format(index-1)) 52 | sum_pic=index-1 53 | return sum_pic 54 | 55 | 56 | def picvideo(path,file): 57 | filelist = os.listdir(path) #获取该目录下的所有文件名 58 | if len(filelist)==0: 59 | return 60 | 61 | height,width,layers=cv2.imread(path+"/"+filelist[0]).shape 62 | size=(width,height) 63 | print(size) 64 | ''' 65 | fps: 66 | 帧率:1秒钟有n张图片写进去[控制一张图片停留5秒钟,那就是帧率为1,重复播放这张图片5次] 67 | 如果文件夹下有50张 534*300的图片,这里设置1秒钟播放5张,那么这个视频的时长就是10秒 68 | ''' 69 | 70 | file_path = file+str(int(time.time())) + ".avi"#导出路径 71 | fourcc = cv2.VideoWriter_fourcc('I', '4', '2', '0')#不同视频编码对应不同视频格式(例:'I','4','2','0' 对应avi格式) 72 | video = cv2.VideoWriter( file_path, fourcc, OUT_FREQUENCY, size ) 73 | 74 | for item in range(1,len(filelist)+1): 75 | item = path + '/' + str(item)+'.jpg' 76 | #print(item) 77 | img = cv2.imread(item) #使用opencv读取图像,直接返回numpy.ndarray 对象,通道顺序为BGR ,注意是BGR,通道值默认范围0-255。 78 | video.write(img) #把图片写进视频 79 | video.release() #释放 80 | 81 | def detect(pathfrom,pathto,img): 82 | global flag 83 | filelist=os.listdir(pathfrom) 84 | 85 | target = cv2.imread(img) 86 | count=1 87 | error_cnt=0 88 | (x,y,w,h)=(0,0,0,0) 89 | temp=get_target(target) 90 | im2, landmarks2 = read_im_and_landmarks(img) 91 | for item in filelist: 92 | if item.endswith('.jpg'): 93 | imagepath=pathfrom+'/'+item 94 | sample_image,e =face.main(imagepath,im2,landmarks2) 95 | error_cnt+=e 96 | if e==0 or flag==1:#是否出错 97 | cv2.imwrite(pathto+'/'+str(count)+'.jpg', sample_image); 98 | count=count+1 99 | print(error_cnt) 100 | return (error_cnt) 101 | 102 | def get_target(tar): 103 | faces =get_landmarks(tar) 104 | img=tar 105 | for index, face in enumerate(faces): 106 | img=tar[face.left():face.right(),face.top():face.bottom()] 107 | return img 108 | 109 | def read_im_and_landmarks(fname): 110 | SCALE_FACTOR = 1 111 | im = cv2.imread(fname, cv2.IMREAD_COLOR) 112 | im = cv2.resize(im, (im.shape[1] * SCALE_FACTOR, 113 | im.shape[0] * SCALE_FACTOR)) 114 | rects = get_landmarks(im) 115 | 116 | if len(rects) > 1: 117 | return im,[] 118 | if len(rects) == 0: 119 | return im,[] 120 | 121 | s=np.matrix([[p.x, p.y] for p in predictor(im, rects[0]).parts()]) 122 | return im, s 123 | 124 | def main(v,t,file_path,f): 125 | # 递归删除之前存放帧图片的文件夹,并新建一个 126 | global flag 127 | flag=f 128 | import shutil 129 | try: 130 | shutil.rmtree(EXTRACT_FOLDER) 131 | shutil.rmtree(OUT_FOLDER) 132 | except OSError: 133 | pass 134 | 135 | os.mkdir(EXTRACT_FOLDER) 136 | os.mkdir(OUT_FOLDER) 137 | # 抽取帧图片,并保存到指定路径 138 | sum_cnt=extract_frames(v, EXTRACT_FOLDER, 1) 139 | error_times=detect(EXTRACT_FOLDER,OUT_FOLDER,t) 140 | if file_path!='': 141 | file_path+='/' 142 | picvideo(OUT_FOLDER,file_path) 143 | return error_times*1.0/sum_cnt 144 | 145 | 146 | ##if __name__ == '__main__': 147 | ## main() 148 | -------------------------------------------------------------------------------- /pface.py: -------------------------------------------------------------------------------- 1 | # encoding=utf-8 2 | from PyQt5 import QtCore, QtGui, QtWidgets 3 | import sys, cv2, time 4 | 5 | from PyQt5.QtWidgets import QFileDialog,QMainWindow,QMessageBox 6 | 7 | from PyQt5.QtCore import QBasicTimer, QThread, pyqtSignal, Qt 8 | 9 | from PyQt5.QtGui import QIcon 10 | 11 | from faceui import Ui_MainWindow 12 | import main 13 | import facetrace 14 | 15 | VIDEO_PATH='123' 16 | TARGET_IMG='' 17 | OUT_PATH='' 18 | FLAG=0 19 | class mywindow(QMainWindow,Ui_MainWindow): #这个窗口继承了用QtDesignner 绘制的窗口 20 | 21 | def __init__(self): 22 | super(mywindow,self).__init__() 23 | self.setupUi(self) 24 | self.setWindowTitle('图标') 25 | self.setWindowIcon(QIcon('./ico.png')) 26 | self.pushButton_2.clicked.connect(self.get_video) 27 | self.pushButton.clicked.connect(self.get_image) 28 | self.pushButton_3.clicked.connect(self.get_dir) 29 | self.pushButton_4.clicked.connect(self.start) 30 | self.progressBar.setValue(0) 31 | self.checkBox.stateChanged.connect(self.get_flag) 32 | self.checkBox_2.stateChanged.connect(self.change_func) 33 | 34 | self.timer = QBasicTimer() 35 | self.step = 0 36 | 37 | self.thread = MyThread() # 创建一个线程 38 | self.thread.sec_changed_signal.connect(self.update) # 线程发过来的信号挂接到槽:update 39 | 40 | 41 | def get_video(self): 42 | if self.timer.isActive(): 43 | return 44 | fileName1,ftype= QFileDialog.getOpenFileName(self, 45 | "选取文件", 46 | "C:/", 47 | "MP4 Files (*.mp4)") #设置文件扩展名过滤,注意用双分号间隔 48 | #print(fileName1) 49 | self.lineEdit.setText(fileName1) 50 | self.clear_timer() 51 | 52 | def get_image(self): 53 | if self.timer.isActive(): 54 | return 55 | fileName1,ftype= QFileDialog.getOpenFileName(self, 56 | "选取文件", 57 | "C:/", 58 | "JPG Files (*.jpg)") #设置文件扩展名过滤,注意用双分号间隔 59 | self.lineEdit_2.setText(fileName1) 60 | self.clear_timer() 61 | 62 | def get_dir(self): 63 | if self.timer.isActive(): 64 | return 65 | fileName1 = QFileDialog.getExistingDirectory(self, 66 | "选取保存地址", 67 | "C:/") #设置文件扩展名过滤,注意用双分号间隔 68 | self.lineEdit_3.setText(fileName1) 69 | self.clear_timer() 70 | 71 | def get_flag(self): 72 | global FLAG 73 | if self.checkBox.isChecked(): 74 | FLAG=1 75 | else: 76 | FLAG=0 77 | 78 | def change_func(self): 79 | global FLAG 80 | if self.checkBox_2.isChecked(): 81 | self.checkBox.setCheckable(False) 82 | FLAG=2 83 | else: 84 | self.checkBox.setCheckable(True) 85 | FLAG=0 86 | 87 | def clear_timer(self): 88 | self.step=0 89 | self.progressBar.setValue(0) 90 | self.pushButton_4.setText('开始') 91 | self.timer.stop() 92 | self.thread.terminate() 93 | 94 | def start(self): 95 | global VIDEO_PATH 96 | global TARGET_IMG 97 | global OUT_PATH 98 | flag=1 99 | video=str(self.lineEdit.text()) 100 | img=str(self.lineEdit_2.text()) 101 | out_path=str(self.lineEdit_3.text()) 102 | if video!='' and img!='': 103 | VIDEO_PATH=video 104 | TARGET_IMG=img 105 | OUT_PATH=out_path 106 | else: 107 | QMessageBox.information(self, "错误", "文件地址不能为空") 108 | flag=0 109 | 110 | if flag==1: 111 | self.doAction() 112 | 113 | def timerEvent(self, e): 114 | if self.step >= 100: 115 | self.timer.stop() 116 | self.label_2.setText('完成') 117 | return 118 | if self.step!=99: 119 | if self.step>40: 120 | self.step = self.step-0.5 121 | self.step = self.step+1 122 | self.progressBar.setValue(int(self.step)) 123 | 124 | def doAction(self): 125 | 126 | if self.timer.isActive(): 127 | self.timer.stop() 128 | self.pushButton_4.setText('开始') 129 | self.thread.terminate() 130 | else: 131 | self.timer.start(100, self) 132 | self.pushButton_4.setText('停止') 133 | self.thread.start() 134 | 135 | def update(self, sec): 136 | self.step=sec 137 | self.progressBar.setValue(self.step) 138 | self.timer.stop() 139 | self.label_2.setText('完成') 140 | 141 | class MyThread(QThread): 142 | 143 | sec_changed_signal = pyqtSignal(int) # 信号类型:int 144 | global VIDEO_PATH 145 | global TARGET_IMG 146 | global OUT_PATH 147 | global FLAG 148 | def __init__(self, flag=1, parent=None): 149 | super().__init__(parent) 150 | self.flag = flag # 默认1000秒 151 | 152 | def run(self): 153 | if self.flag==1: 154 | if FLAG==0 or FLAG==1: 155 | main.main(VIDEO_PATH,TARGET_IMG,OUT_PATH,FLAG) 156 | else: 157 | facetrace.ex_run(VIDEO_PATH,TARGET_IMG,OUT_PATH) 158 | self.flag=0 159 | self.sec_changed_signal.emit(100) #发射信号 160 | else: 161 | return 162 | 163 | 164 | if __name__ == '__main__': 165 | app = QtWidgets.QApplication(sys.argv) 166 | window = mywindow() 167 | window.show() 168 | sys.exit(app.exec_()) 169 | -------------------------------------------------------------------------------- /trygrabcut.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | import dlib 4 | 5 | def call(tar): 6 | ## img=cv2.imread('7.jpg') 7 | img=tar 8 | 9 | detector = dlib.get_frontal_face_detector() 10 | rects=detector(img,1) 11 | #rect=(rects[0].left(), 2*rects[0].top()-rects[0].bottom(), rects[0].right(), rects[0].bottom()) 12 | img = img[rects[0].top():rects[0].bottom(),rects[0].left():rects[0].right()] 13 | rect=(0,0,img.shape[0]-1,img.shape[1]-1) 14 | mask=np.zeros((img.shape[:2]),np.uint8) 15 | bgdModel=np.zeros((1,65),np.float64) 16 | fgdModel=np.zeros((1,65),np.float64) 17 | 18 | 19 | #cv2.imshow("kk") 20 | 21 | #这里计算了5次 22 | cv2.grabCut(img,mask,rect,bgdModel,fgdModel,5,cv2.GC_INIT_WITH_RECT) 23 | #关于where函数第一个参数是条件,满足条件的话赋值为0,否则是1。如果只有第一个参数的话返回满足条件元素的坐标。 24 | mask2=np.where((mask==2)|(mask==0),0,1).astype('uint8') 25 | #mask2就是这样固定的 26 | #这里的img也是固定的。 27 | (x,y)=mask2.shape 28 | iMin=y 29 | jMin=y 30 | iMax=0 31 | jMax=0 32 | for i in range(x): 33 | for j in range(y): 34 | if mask2[i,j]==0: 35 | img[i,j]=255 36 | else: 37 | jMin=min(j,jMin) 38 | jMax=max(j,jMax) 39 | iMin=min(i,iMin) 40 | iMax=max(i,iMax) 41 | #img=img*mask2[:,:,np.newaxis] 42 | #cv2.imwrite('1.jpg',img[jMin:jMax,iMin:iMax]) 43 | return img[jMin:jMax,iMin:iMax] 44 | --------------------------------------------------------------------------------