├── SwapCV
├── steveharvey.jpg
└── FaceSwapCV.ipynb
└── README.md
/SwapCV/steveharvey.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Venka97/FaceSwapCV/HEAD/SwapCV/steveharvey.jpg
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Content: Computer Vision
2 | ## Project: SwapCV
3 |
4 | This project allows you to swap faces in real time.
5 |
6 | ## Required Libraries
7 |
8 | To run the script you'll need -
9 | * dlib
10 | * Haar Cascades
11 | * OpenCV
12 |
13 | You'll also need to obtain the trained model from sourceforge:
14 | http://sourceforge.net/projects/dclib/files/dlib/v18.10/shape_predictor_68_face_landmarks.dat.bz2
15 |
16 | Unzip with `bunzip2` and change `PREDICTOR_PATH` to refer to this file.
17 |
18 | Get the frontal face Haar Cascade from : https://github.com/opencv/opencv/tree/master/data/haarcascades
19 |
20 | Get dlib from (http://dlib.net).
21 |
22 | ### Instructions -
23 |
24 | To run this code, in the file FaceSwapCV.ipynb, on line 8 in the code block add the path of your predictor in 'PREDICTOR_PATH' variable and on line 35 add the path of your HAAR cascade classifier in the variable 'cascade_path'.
25 |
26 | ##### The code for swapping faces using Dlib was sourced from https://matthewearl.github.io/2015/07/28/switching-eds-with-python/ and the original code was modified to implementation of face swap filters in real time.
27 |
--------------------------------------------------------------------------------
/SwapCV/FaceSwapCV.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {
7 | "collapsed": false
8 | },
9 | "outputs": [],
10 | "source": [
11 | "\n",
12 | "\n",
13 | "\"\"\"\n",
14 | "This code is inspired by Switching Eds blog post for detection face through Dlib:\n",
15 | " http://matthewearl.github.io/2015/07/28/switching-eds-with-python/\n",
16 | "See the above for an explanation of the code below.\n",
17 | "To run the script you'll need to install dlib (http://dlib.net) including its\n",
18 | "Python bindings, and OpenCV. You'll also need to obtain the trained model from\n",
19 | "sourceforge:\n",
20 | " http://sourceforge.net/projects/dclib/files/dlib/v18.10/shape_predictor_68_face_landmarks.dat.bz2\n",
21 | "Unzip with `bunzip2` and change `PREDICTOR_PATH` to refer to this file. The\n",
22 | "script is run like so:\n",
23 | " ./faceswap.py
\n",
24 | "If successful, a video will be produced with the facial features\n",
25 | "from `` replaced with the facial features from ``.\n",
26 | "\"\"\""
27 | ]
28 | },
29 | {
30 | "cell_type": "code",
31 | "execution_count": null,
32 | "metadata": {
33 | "collapsed": false
34 | },
35 | "outputs": [],
36 | "source": [
37 | "import cv2\n",
38 | "import dlib\n",
39 | "import numpy\n",
40 | "from time import sleep\n",
41 | "import sys\n",
42 | "\n",
43 | "##pretrained model that predicts the rectangles that correspond to the facial features of a face\n",
44 | "PREDICTOR_PATH = \"\" #Add the path of your shape predictor here\n",
45 | "SCALE_FACTOR = 1 \n",
46 | "FEATHER_AMOUNT = 11\n",
47 | "\n",
48 | "FACE_POINTS = list(range(17, 68))\n",
49 | "MOUTH_POINTS = list(range(48, 61))\n",
50 | "RIGHT_BROW_POINTS = list(range(17, 22))\n",
51 | "LEFT_BROW_POINTS = list(range(22, 27))\n",
52 | "RIGHT_EYE_POINTS = list(range(36, 42))\n",
53 | "LEFT_EYE_POINTS = list(range(42, 48))\n",
54 | "NOSE_POINTS = list(range(27, 35))\n",
55 | "JAW_POINTS = list(range(0, 17))\n",
56 | "\n",
57 | "# Points used to line up the images.\n",
58 | "ALIGN_POINTS = (LEFT_BROW_POINTS + RIGHT_EYE_POINTS + LEFT_EYE_POINTS +\n",
59 | " RIGHT_BROW_POINTS + NOSE_POINTS + MOUTH_POINTS)\n",
60 | "\n",
61 | "# Points from the second image to overlay on the first. The convex hull of each\n",
62 | "# element will be overlaid.\n",
63 | "OVERLAY_POINTS = [\n",
64 | " LEFT_EYE_POINTS + RIGHT_EYE_POINTS + LEFT_BROW_POINTS + RIGHT_BROW_POINTS,\n",
65 | " NOSE_POINTS + MOUTH_POINTS,\n",
66 | "]\n",
67 | "\n",
68 | "# Amount of blur to use during colour correction, as a fraction of the\n",
69 | "# pupillary distance.\n",
70 | "COLOUR_CORRECT_BLUR_FRAC = 0.6\n",
71 | "cascade_path= '' #Add the path of your HAAR cascade classifier here\n",
72 | "cascade = cv2.CascadeClassifier(cascade_path)\n",
73 | "detector = dlib.get_frontal_face_detector()\n",
74 | "predictor = dlib.shape_predictor(PREDICTOR_PATH)\n",
75 | "\n",
76 | "\n",
77 | "def get_landmarks(im, dlibOn):\n",
78 | " \n",
79 | " if (dlibOn == True):\n",
80 | " rects = detector(im, 1)\n",
81 | " if len(rects) > 1:\n",
82 | " return \"error\"\n",
83 | " if len(rects) == 0:\n",
84 | " return \"error\"\n",
85 | " return numpy.matrix([[p.x, p.y] for p in predictor(im, rects[0]).parts()])\n",
86 | " \n",
87 | " else:\n",
88 | " rects = cascade.detectMultiScale(im, 1.3,5)\n",
89 | " if len(rects) > 1:\n",
90 | " return \"error\"\n",
91 | " if len(rects) == 0:\n",
92 | " return \"error\"\n",
93 | " x,y,w,h =rects[0]\n",
94 | " rect=dlib.rectangle(x,y,x+w,y+h)\n",
95 | " return numpy.matrix([[p.x, p.y] for p in predictor(im, rect).parts()])\n",
96 | "\n",
97 | " \n",
98 | "def annotate_landmarks(im, landmarks):\n",
99 | " im = im.copy()\n",
100 | " for idx, point in enumerate(landmarks):\n",
101 | " pos = (point[0, 0], point[0, 1])\n",
102 | " cv2.putText(im, str(idx), pos,\n",
103 | " fontFace=cv2.FONT_HERSHEY_SCRIPT_SIMPLEX,\n",
104 | " fontScale=0.4,\n",
105 | " color=(0, 0, 255))\n",
106 | " cv2.circle(im, pos, 3, color=(0, 255, 255))\n",
107 | " return im\n",
108 | "\n",
109 | "\n",
110 | "def draw_convex_hull(im, points, color):\n",
111 | " points = cv2.convexHull(points)\n",
112 | " cv2.fillConvexPoly(im, points, color=color)\n",
113 | "\n",
114 | " \n",
115 | "def get_face_mask(im, landmarks):\n",
116 | " im = numpy.zeros(im.shape[:2], dtype=numpy.float64)\n",
117 | "\n",
118 | " for group in OVERLAY_POINTS:\n",
119 | " draw_convex_hull(im,\n",
120 | " landmarks[group],\n",
121 | " color=1)\n",
122 | "\n",
123 | " im = numpy.array([im, im, im]).transpose((1, 2, 0))\n",
124 | "\n",
125 | " im = (cv2.GaussianBlur(im, (FEATHER_AMOUNT, FEATHER_AMOUNT), 0) > 0) * 1.0\n",
126 | " im = cv2.GaussianBlur(im, (FEATHER_AMOUNT, FEATHER_AMOUNT), 0)\n",
127 | "\n",
128 | " return im\n",
129 | " \n",
130 | " \n",
131 | "def transformation_from_points(points1, points2):\n",
132 | " \"\"\"\n",
133 | " Return an affine transformation [s * R | T] such that:\n",
134 | " sum ||s*R*p1,i + T - p2,i||^2\n",
135 | " is minimized.\n",
136 | " \"\"\"\n",
137 | " # Solve the procrustes problem by subtracting centroids, scaling by the\n",
138 | " # standard deviation, and then using the SVD to calculate the rotation\n",
139 | "\n",
140 | " points1 = points1.astype(numpy.float64)\n",
141 | " points2 = points2.astype(numpy.float64)\n",
142 | "\n",
143 | " c1 = numpy.mean(points1, axis=0)\n",
144 | " c2 = numpy.mean(points2, axis=0)\n",
145 | " points1 -= c1\n",
146 | " points2 -= c2\n",
147 | "\n",
148 | " s1 = numpy.std(points1)\n",
149 | " s2 = numpy.std(points2)\n",
150 | " points1 /= s1\n",
151 | " points2 /= s2\n",
152 | "\n",
153 | " U, S, Vt = numpy.linalg.svd(points1.T * points2)\n",
154 | "\n",
155 | " # The R we seek is in fact the transpose of the one given by U * Vt. This\n",
156 | " # is because the above formulation assumes the matrix goes on the right\n",
157 | " # (with row vectors) where as our solution requires the matrix to be on the\n",
158 | " # left (with column vectors).\n",
159 | " R = (U * Vt).T\n",
160 | "\n",
161 | " return numpy.vstack([numpy.hstack(((s2 / s1) * R,\n",
162 | " c2.T - (s2 / s1) * R * c1.T)),\n",
163 | " numpy.matrix([0., 0., 1.])])\n",
164 | "\n",
165 | "\n",
166 | "def read_im_and_landmarks(fname):\n",
167 | " im = cv2.imread(fname, cv2.IMREAD_COLOR)\n",
168 | " im = cv2.resize(im,None,fx=0.35, fy=0.35, interpolation = cv2.INTER_LINEAR)\n",
169 | " im = cv2.resize(im, (im.shape[1] * SCALE_FACTOR,\n",
170 | " im.shape[0] * SCALE_FACTOR))\n",
171 | " s = get_landmarks(im,dlibOn)\n",
172 | "\n",
173 | " return im, s\n",
174 | "\n",
175 | "\n",
176 | "def warp_im(im, M, dshape):\n",
177 | " output_im = numpy.zeros(dshape, dtype=im.dtype)\n",
178 | " cv2.warpAffine(im,\n",
179 | " M[:2],\n",
180 | " (dshape[1], dshape[0]),\n",
181 | " dst=output_im,\n",
182 | " borderMode=cv2.BORDER_TRANSPARENT,\n",
183 | " flags=cv2.WARP_INVERSE_MAP)\n",
184 | " return output_im\n",
185 | "\n",
186 | "\n",
187 | "def correct_colours(im1, im2, landmarks1):\n",
188 | " blur_amount = COLOUR_CORRECT_BLUR_FRAC * numpy.linalg.norm(\n",
189 | " numpy.mean(landmarks1[LEFT_EYE_POINTS], axis=0) -\n",
190 | " numpy.mean(landmarks1[RIGHT_EYE_POINTS], axis=0))\n",
191 | " blur_amount = int(blur_amount)\n",
192 | " if blur_amount % 2 == 0:\n",
193 | " blur_amount += 1\n",
194 | " im1_blur = cv2.GaussianBlur(im1, (blur_amount, blur_amount), 0)\n",
195 | " im2_blur = cv2.GaussianBlur(im2, (blur_amount, blur_amount), 0)\n",
196 | "\n",
197 | " # Avoid divide-by-zero errors.\n",
198 | " im2_blur += (128 * (im2_blur <= 1.0)).astype(im2_blur.dtype)\n",
199 | "\n",
200 | " return (im2.astype(numpy.float64) * im1_blur.astype(numpy.float64) /\n",
201 | " im2_blur.astype(numpy.float64))\n",
202 | "\n",
203 | "def face_swap(img,name):\n",
204 | "\n",
205 | " s = get_landmarks(img,True)\n",
206 | " \n",
207 | " if (s == \"error\"):\n",
208 | " print \"No or too many faces\"\n",
209 | " return img\n",
210 | " \n",
211 | " im1, landmarks1 = img, s\n",
212 | " im2, landmarks2 = read_im_and_landmarks(name)\n",
213 | "\n",
214 | " M = transformation_from_points(landmarks1[ALIGN_POINTS],\n",
215 | " landmarks2[ALIGN_POINTS])\n",
216 | "\n",
217 | " mask = get_face_mask(im2, landmarks2)\n",
218 | " warped_mask = warp_im(mask, M, im1.shape)\n",
219 | " combined_mask = numpy.max([get_face_mask(im1, landmarks1), warped_mask],\n",
220 | " axis=0)\n",
221 | "\n",
222 | " warped_im2 = warp_im(im2, M, im1.shape)\n",
223 | " \n",
224 | " warped_corrected_im2 = correct_colours(im1, warped_im2, landmarks1)\n",
225 | "\n",
226 | " output_im = im1 * (1.0 - combined_mask) + warped_corrected_im2 * combined_mask\n",
227 | " \n",
228 | " #output_im is no longer in the expected OpenCV format so we use openCV \n",
229 | " #to write the image to the disk and then reload it\n",
230 | " cv2.imwrite('output.jpg', output_im)\n",
231 | " image = cv2.imread('output.jpg')\n",
232 | " \n",
233 | " frame = cv2.resize(image,None,fx=1.5, fy=1.5, interpolation = cv2.INTER_LINEAR)\n",
234 | " \n",
235 | " return image \n",
236 | " \n",
237 | "\n",
238 | "cap = cv2.VideoCapture(0)\n",
239 | "\n",
240 | "\n",
241 | "# Set dlibOn controls for using dlib's facial landmark detector \n",
242 | "# or use HAAR Cascade Classifiers (faster)\n",
243 | "\n",
244 | "filter_image = cv2.imread(\"./steveharvey.jpg\") #Put your image here\n",
245 | "dlibOn = False\n",
246 | "\n",
247 | "while True: \n",
248 | " ret, frame = cap.read() \n",
249 | " \n",
250 | " #Reduce image size by 75% to reduce processing time and improve framerates\n",
251 | " frame = cv2.resize(frame, None, fx=0.75, fy=0.75, interpolation = cv2.INTER_LINEAR)\n",
252 | " \n",
253 | " # flip image so that it's more mirror like\n",
254 | " frame = cv2.flip(frame, 1)\n",
255 | " \n",
256 | " cv2.imshow('Face Swapper', face_swap(frame, filter_image))\n",
257 | " \n",
258 | " if cv2.waitKey(1) == 13: #Enter Key\n",
259 | " break\n",
260 | "\n",
261 | "cap.release()\n",
262 | "cv2.destroyAllWindows() "
263 | ]
264 | }
265 | ],
266 | "metadata": {
267 | "anaconda-cloud": {},
268 | "kernelspec": {
269 | "display_name": "Python [conda root]",
270 | "language": "python",
271 | "name": "conda-root-py"
272 | },
273 | "language_info": {
274 | "codemirror_mode": {
275 | "name": "ipython",
276 | "version": 2
277 | },
278 | "file_extension": ".py",
279 | "mimetype": "text/x-python",
280 | "name": "python",
281 | "nbconvert_exporter": "python",
282 | "pygments_lexer": "ipython2",
283 | "version": "2.7.12"
284 | }
285 | },
286 | "nbformat": 4,
287 | "nbformat_minor": 1
288 | }
289 |
--------------------------------------------------------------------------------