├── images ├── out.gif └── eyeflow.png ├── README.md └── eye-glitch.py /images/out.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/burningion/snapchat-lens-effect-in-python/HEAD/images/out.gif -------------------------------------------------------------------------------- /images/eyeflow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/burningion/snapchat-lens-effect-in-python/HEAD/images/eyeflow.png -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Snapchat Lens Effect in Python 2 | 3 | This is the companion repository for the blog post at [makeartwithpython.com](https://www.makeartwithpython.com/blog/building-a-snapchat-lens-effect-in-python/). 4 | 5 | ## Architecture 6 | 7 | ![Lens Architecture](https://github.com/burningion/snapchat-lens-effect-in-python/raw/master/images/eyeflow.png) 8 | 9 | ## Usage 10 | 11 | You'll need to download the [shape_predictor_68](https://github.com/davisking/dlib-models/blob/master/shape_predictor_68_face_landmarks.dat.bz2) from dlib-models and unzip it in this directory first. 12 | 13 | After, you should be able to just pass in the location of that predictor to the Python3 program as a command line argument like so: 14 | 15 | ```bash 16 | $ python3 eye-glitch.py -predictor shape_predictor_68_face_landmarks.dat 17 | ``` 18 | 19 | ... and by pressing 's' to enable the eye snake, you'll end up with something like this: 20 | 21 | ![Image like this](https://github.com/burningion/snapchat-lens-effect-in-python/raw/master/images/out.gif) 22 | 23 | See the full post at https://www.makeartwithpython.com/blog/building-a-snapchat-lens-effect-in-python/ 24 | -------------------------------------------------------------------------------- /eye-glitch.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | import cv2 4 | from imutils.video import VideoStream 5 | from imutils import face_utils, translate, resize 6 | 7 | import time 8 | import dlib 9 | 10 | import numpy as np 11 | 12 | parser = argparse.ArgumentParser() 13 | parser.add_argument("-predictor", required=True, help="path to predictor") 14 | args = parser.parse_args() 15 | 16 | print("starting program.") 17 | print("'s' starts drawing eyes.") 18 | print("'r' to toggle recording image, and 'q' to quit") 19 | 20 | vs = VideoStream().start() 21 | time.sleep(1.5) 22 | 23 | # this detects our face 24 | detector = dlib.get_frontal_face_detector() 25 | # and this predicts our face's orientation 26 | predictor = dlib.shape_predictor(args.predictor) 27 | 28 | recording = False 29 | counter = 0 30 | 31 | class EyeList(object): 32 | def __init__(self, length): 33 | self.length = length 34 | self.eyes = [] 35 | 36 | def push(self, newcoords): 37 | if len(self.eyes) < self.length: 38 | self.eyes.append(newcoords) 39 | else: 40 | self.eyes.pop(0) 41 | self.eyes.append(newcoords) 42 | 43 | def clear(self): 44 | self.eyes = [] 45 | 46 | # start with 10 previous eye positions 47 | eyelist = EyeList(10) 48 | eyeSnake = False 49 | 50 | # get our first frame outside of loop, so we can see how our 51 | # webcam resized itself, and it's resolution w/ np.shape 52 | frame = vs.read() 53 | frame = resize(frame, width=800) 54 | 55 | eyelayer = np.zeros(frame.shape, dtype='uint8') 56 | eyemask = eyelayer.copy() 57 | eyemask = cv2.cvtColor(eyemask, cv2.COLOR_BGR2GRAY) 58 | translated = np.zeros(frame.shape, dtype='uint8') 59 | translated_mask = eyemask.copy() 60 | 61 | while True: 62 | # read a frame from webcam, resize to be smaller 63 | frame = vs.read() 64 | frame = resize(frame, width=800) 65 | 66 | # fill our masks and frames with 0 (black) on every draw loop 67 | eyelayer.fill(0) 68 | eyemask.fill(0) 69 | translated.fill(0) 70 | translated_mask.fill(0) 71 | 72 | # the detector and predictor expect a grayscale image 73 | gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) 74 | rects = detector(gray, 0) 75 | 76 | # if we're running the eyesnake loop (press 's' while running to enable) 77 | if eyeSnake: 78 | for rect in rects: 79 | # the predictor is our 68 point model we loaded 80 | shape = predictor(gray, rect) 81 | shape = face_utils.shape_to_np(shape) 82 | 83 | # our dlib model returns 68 points that make up a face. 84 | # the left eye is the 36th point through the 42nd. the right 85 | # eye is the 42nd point through the 48th. 86 | leftEye = shape[36:42] 87 | rightEye = shape[42:48] 88 | 89 | # fill our mask in the shape of our eyes 90 | cv2.fillPoly(eyemask, [leftEye], 255) 91 | cv2.fillPoly(eyemask, [rightEye], 255) 92 | 93 | # copy the image from the frame onto the eyelayer using that mask 94 | eyelayer = cv2.bitwise_and(frame, frame, mask=eyemask) 95 | 96 | # we use this to get an x and y coordinate for the pasting of eyes 97 | x, y, w, h = cv2.boundingRect(eyemask) 98 | 99 | # push this onto our list 100 | eyelist.push([x, y]) 101 | 102 | # finally, draw our eyes, in reverse order 103 | for i in reversed(eyelist.eyes): 104 | # first, translate the eyelayer with just the eyes 105 | translated1 = translate(eyelayer, i[0] - x, i[1] - y) 106 | # next, translate its mask 107 | translated1_mask = translate(eyemask, i[0] - x, i[1] - y) 108 | # add it to the existing translated eyes mask (not actual add because of 109 | # risk of overflow) 110 | translated_mask = np.maximum(translated_mask, translated1_mask) 111 | # cut out the new translated mask 112 | translated = cv2.bitwise_and(translated, translated, mask=255 - translated1_mask) 113 | # paste in the newly translated eye position 114 | translated += translated1 115 | # again, cut out the translated mask 116 | frame = cv2.bitwise_and(frame, frame, mask=255 - translated_mask) 117 | # and paste in the translated eye image 118 | frame += translated 119 | 120 | # display the current frame, and check to see if user pressed a key 121 | cv2.imshow("eye glitch", frame) 122 | key = cv2.waitKey(1) & 0xFF 123 | 124 | if recording: 125 | # create a directory called "image_seq", and we'll be able to create gifs in ffmpeg 126 | # from image sequences 127 | cv2.imwrite("image_seq/%05d.png" % counter, frame) 128 | counter += 1 129 | 130 | if key == ord("q"): 131 | break 132 | 133 | if key == ord("s"): 134 | eyeSnake = not eyeSnake 135 | eyelist.clear() 136 | 137 | if key == ord("r"): 138 | recording = not recording 139 | 140 | cv2.destroyAllWindows() 141 | vs.stop() 142 | --------------------------------------------------------------------------------