├── Readme.md
├── camera.py
├── environment.yml
├── image.py
├── sample
├── 000.png
├── after.png
├── comparison.png
├── face.png
├── facial_landmarks.jpeg
├── mask.png
└── output_video.mp4
└── utils.py
/Readme.md:
--------------------------------------------------------------------------------
1 | # Virtual Makeup Using Mediapipe
2 | ## Clone and Run
3 | ```
4 | git clone https://github.com/Jayanths9/Virtual_Makeup_opencv.git
5 | cd Virtual_Makeup_opencv
6 | conda env create -f environment.yml
7 | conda activate virtual_makeup
8 | ```
9 | ### Run on Camera input
10 | ```
11 | python camera.py
12 | ```
13 | ### Run on sample image
14 | ```
15 | python image.py --image sample/face.png
16 | ```
17 |
18 | # Introduction
19 |
20 | In this project Mediapipe [1] facial landmarks and opencv is used to add makeup on facial features.
21 | - Mediapipe facial landmark library detects the face in the image and returns 478 landmarks on human face. (x,y) coordinates of each points is obtained w.r.t the image size.
22 |
23 |
24 |
25 |
26 | Media pipe facial landmarks example [2]
27 |
28 |
29 | - From all the facial landmarks, extract Lips, Eyebrow, Eyeliner & Eyeshadow points and create a colored mask with respect to the input image.
30 |
31 |
32 |
33 |
34 | Colored Mask for Lips, Eyebrow, Eyeliner & Eyeshadow
35 |
36 |
37 | - Blend the Original image and the mask with respect to its weights to add makeup on the original image.
38 |
39 |
40 |
41 |
42 | Original image and Transformed Image with Makeup [3]
43 |
44 |
45 | - Virtual Makeup on video.
46 |
47 |
48 |
49 |
50 |
51 | Virtal makeup on video [4]
52 |
53 |
54 |
55 |
56 | # Refrences
57 | 1. https://ai.google.dev/edge/mediapipe/solutions/vision/face_landmarker
58 | 2. https://medium.com/@hotakoma/mediapipe-landmark-face-hand-pose-sequence-number-list-view-778364d6c414
59 | 3. https://i.pinimg.com/originals/a9/93/7d/a9937d95f962f477c486d701a5152752.jpg
60 | 4. https://www.pexels.com/video/attractive-woman-looking-at-the-camera-7048981/
61 |
62 | ---
63 | Author:
64 | Jayanth S
65 | universitat Bremen, Bremen
66 |
--------------------------------------------------------------------------------
/camera.py:
--------------------------------------------------------------------------------
1 | from utils import *
2 |
3 | # features to add makeup
4 | face_elements = [
5 | "LIP_LOWER",
6 | "LIP_UPPER",
7 | "EYEBROW_LEFT",
8 | "EYEBROW_RIGHT",
9 | "EYELINER_LEFT",
10 | "EYELINER_RIGHT",
11 | "EYESHADOW_LEFT",
12 | "EYESHADOW_RIGHT",
13 | ]
14 |
15 | # change the color of features
16 | colors_map = {
17 | # upper lip and lower lips
18 | "LIP_UPPER": [0, 0, 255], # Red in BGR
19 | "LIP_LOWER": [0, 0, 255], # Red in BGR
20 | # eyeliner
21 | "EYELINER_LEFT": [139, 0, 0], # Dark Blue in BGR
22 | "EYELINER_RIGHT": [139, 0, 0], # Dark Blue in BGR
23 | # eye shadow
24 | "EYESHADOW_LEFT": [0, 100, 0], # Dark Green in BGR
25 | "EYESHADOW_RIGHT": [0, 100, 0], # Dark Green in BGR
26 | # eye brow
27 | "EYEBROW_LEFT": [19, 69, 139], # Dark Brown in BGR
28 | "EYEBROW_RIGHT": [19, 69, 139], # Dark Brown in BGR
29 | }
30 |
31 |
32 | face_connections=[face_points[idx] for idx in face_elements]
33 | colors=[colors_map[idx] for idx in face_elements]
34 |
35 | video_capture = cv2.VideoCapture(0)
36 | while True:
37 | # read image from camera
38 | success, image = video_capture.read()
39 | image = cv2.flip(image, 1)
40 | # if input from camera
41 | if success:
42 | # create a empty mask like image
43 | mask = np.zeros_like(image)
44 | # extract facial landmarks
45 | face_landmarks = read_landmarks(image=image)
46 | # create mask for facial features with color
47 | mask = add_mask(
48 | mask,
49 | idx_to_coordinates=face_landmarks,
50 | face_connections=face_connections,colors=colors
51 | )
52 | # combine the image and mask with w.r.to weights
53 | output = cv2.addWeighted(image, 1.0, mask, 0.2, 1.0)
54 | cv2.imshow("Feature", output)
55 | # press q to exit the cv2 window
56 | if cv2.waitKey(100) & 0xFF == ord("q"):
57 | break
58 | video_capture.release()
59 | cv2.destroyAllWindows()
60 |
--------------------------------------------------------------------------------
/environment.yml:
--------------------------------------------------------------------------------
1 | name: virtual_makeup
2 | channels:
3 | - defaults
4 | dependencies:
5 | - _libgcc_mutex=0.1
6 | - _openmp_mutex=5.1
7 | - bzip2=1.0.8
8 | - ca-certificates=2024.3.11
9 | - ld_impl_linux-64=2.38
10 | - libffi=3.4.4
11 | - libgcc-ng=11.2.0
12 | - libgomp=11.2.0
13 | - libstdcxx-ng=11.2.0
14 | - libuuid=1.41.5
15 | - ncurses=6.4
16 | - openssl=3.0.13
17 | - pip=24.0
18 | - python=3.11.9
19 | - readline=8.2
20 | - setuptools=69.5.1
21 | - sqlite=3.45.3
22 | - tk=8.6.14
23 | - tzdata=2024a
24 | - wheel=0.43.0
25 | - xz=5.4.6
26 | - zlib=1.2.13
27 | - pip:
28 | - absl-py==2.1.0
29 | - attrs==23.2.0
30 | - cffi==1.16.0
31 | - contourpy==1.2.1
32 | - cycler==0.12.1
33 | - flatbuffers==24.3.25
34 | - fonttools==4.52.4
35 | - jax==0.4.28
36 | - jaxlib==0.4.28
37 | - kiwisolver==1.4.5
38 | - matplotlib==3.9.0
39 | - mediapipe==0.10.14
40 | - ml-dtypes==0.4.0
41 | - numpy==1.26.4
42 | - opencv-contrib-python==4.9.0.80
43 | - opt-einsum==3.3.0
44 | - packaging==24.0
45 | - pillow==10.3.0
46 | - protobuf==4.25.3
47 | - pycparser==2.22
48 | - pyparsing==3.1.2
49 | - python-dateutil==2.9.0.post0
50 | - scipy==1.13.1
51 | - six==1.16.0
52 | - sounddevice==0.4.7
53 |
--------------------------------------------------------------------------------
/image.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import argparse
3 | from utils import *
4 |
5 |
6 | # features to add makeup
7 | face_elements = [
8 | "LIP_LOWER",
9 | "LIP_UPPER",
10 | "EYEBROW_LEFT",
11 | "EYEBROW_RIGHT",
12 | "EYELINER_LEFT",
13 | "EYELINER_RIGHT",
14 | "EYESHADOW_LEFT",
15 | "EYESHADOW_RIGHT",
16 | ]
17 |
18 | # change the color of features
19 | colors_map = {
20 | # upper lip and lower lips
21 | "LIP_UPPER": [0, 0, 255], # Red in BGR
22 | "LIP_LOWER": [0, 0, 255], # Red in BGR
23 | # eyeliner
24 | "EYELINER_LEFT": [139, 0, 0], # Dark Blue in BGR
25 | "EYELINER_RIGHT": [139, 0, 0], # Dark Blue in BGR
26 | # eye shadow
27 | "EYESHADOW_LEFT": [0, 100, 0], # Dark Green in BGR
28 | "EYESHADOW_RIGHT": [0, 100, 0], # Dark Green in BGR
29 | # eye brow
30 | "EYEBROW_LEFT": [19, 69, 139], # Dark Brown in BGR
31 | "EYEBROW_RIGHT": [19, 69, 139], # Dark Brown in BGR
32 | }
33 |
34 |
35 | def main(image_path):
36 | # extract required facial points from face_elements
37 | face_connections=[face_points[idx] for idx in face_elements]
38 | # extract corresponding colors for each facial features
39 | colors=[colors_map[idx] for idx in face_elements]
40 | # read image
41 | image = cv2.imread(image_path)
42 | # create a empty mask like image
43 | mask = np.zeros_like(image)
44 | # extract facial landmarks
45 | face_landmarks = read_landmarks(image=image)
46 | # create mask for facial features with color
47 | mask = add_mask(
48 | mask,
49 | idx_to_coordinates=face_landmarks,
50 | face_connections=face_connections,colors=colors
51 | )
52 | # combine the image and mask with w.r.to weights
53 | output = cv2.addWeighted(image, 1.0, mask, 0.2, 1.0)
54 | # display the image
55 | show_image(output)
56 |
57 | if __name__ == "__main__":
58 | # argument parser
59 | parser = argparse.ArgumentParser(description="Image to add Facial makeup ")
60 | # add image path as argumment
61 | parser.add_argument("--img", type=str, help="Path to the image.")
62 | args = parser.parse_args()
63 | main(args.img)
64 |
--------------------------------------------------------------------------------
/sample/000.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Jayanths9/Virtual_Makeup/0bb60d8e6318d5538c7ddca73e1832c7915c5ed4/sample/000.png
--------------------------------------------------------------------------------
/sample/after.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Jayanths9/Virtual_Makeup/0bb60d8e6318d5538c7ddca73e1832c7915c5ed4/sample/after.png
--------------------------------------------------------------------------------
/sample/comparison.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Jayanths9/Virtual_Makeup/0bb60d8e6318d5538c7ddca73e1832c7915c5ed4/sample/comparison.png
--------------------------------------------------------------------------------
/sample/face.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Jayanths9/Virtual_Makeup/0bb60d8e6318d5538c7ddca73e1832c7915c5ed4/sample/face.png
--------------------------------------------------------------------------------
/sample/facial_landmarks.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Jayanths9/Virtual_Makeup/0bb60d8e6318d5538c7ddca73e1832c7915c5ed4/sample/facial_landmarks.jpeg
--------------------------------------------------------------------------------
/sample/mask.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Jayanths9/Virtual_Makeup/0bb60d8e6318d5538c7ddca73e1832c7915c5ed4/sample/mask.png
--------------------------------------------------------------------------------
/sample/output_video.mp4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Jayanths9/Virtual_Makeup/0bb60d8e6318d5538c7ddca73e1832c7915c5ed4/sample/output_video.mp4
--------------------------------------------------------------------------------
/utils.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import mediapipe as mp
3 | import cv2
4 |
5 |
6 | # landmarks of features from mediapipe
7 | face_points={
8 | "BLUSH_LEFT": [50],
9 | "BLUSH_RIGHT": [280],
10 | "LEFT_EYE": [33, 246, 161, 160, 159, 158, 157, 173, 133, 155, 154, 153, 145, 144, 163, 7, 33],
11 | "RIGHT_EYE": [362, 298, 384, 385, 386, 387, 388, 466, 263, 249, 390, 373, 374, 380, 381, 382, 362],
12 | "EYELINER_LEFT": [243, 112, 26, 22, 23, 24, 110, 25, 226, 130, 33, 7, 163, 144, 145, 153, 154, 155, 133, 243],
13 | "EYELINER_RIGHT": [463, 362, 382, 381, 380, 374, 373, 390, 249, 263, 359, 446, 255, 339, 254, 253, 252, 256, 341, 463],
14 | "EYESHADOW_LEFT": [226, 247, 30, 29, 27, 28, 56, 190, 243, 173, 157, 158, 159, 160, 161, 246, 33, 130, 226],
15 | "EYESHADOW_RIGHT": [463, 414, 286, 258, 257, 259, 260, 467, 446, 359, 263, 466, 388, 387, 386, 385, 384, 398, 362, 463],
16 | "FACE": [152, 148, 176, 149, 150, 136, 172, 58, 132, 93, 234, 127, 162, 21, 54, 103, 67, 109, 10, 338, 297, 332, 284, 251, 389, 454, 323, 401, 361, 435, 288, 397, 365, 379, 378, 400, 377, 152],
17 | "LIP_UPPER": [61, 185, 40, 39, 37, 0, 267, 269, 270, 409, 291, 308, 415, 310, 312, 13, 82, 81, 80, 191, 78],
18 | "LIP_LOWER": [61, 146, 91, 181, 84, 17, 314, 405, 321, 375, 291, 308, 324, 402, 317, 14, 87, 178, 88, 95, 78, 61],
19 | "EYEBROW_LEFT": [55, 107, 66, 105, 63, 70, 46, 53, 52, 65, 55],
20 | "EYEBROW_RIGHT": [285, 336, 296, 334, 293, 300, 276, 283, 295, 285]
21 | }
22 |
23 | # initialize mediapipe functions
24 | mp_face_mesh = mp.solutions.face_mesh
25 | mp_drawing = mp.solutions.drawing_utils
26 |
27 |
28 | # to display image in cv2 window
29 | def show_image(image: np.array, msg: str = "Loaded Image"):
30 | """
31 | image : image as np array
32 | msg : cv2 window name
33 | """
34 | image_copy = image.copy()
35 | cv2.imshow(msg, image_copy)
36 | cv2.waitKey(0)
37 | cv2.destroyAllWindows()
38 |
39 |
40 | def read_landmarks(image: np.array):
41 | """
42 | image : image as np.array
43 | """
44 | landmark_cordinates = {}
45 | # load mediapipe facemesh and detect facial landmarks
46 | # face landmarks returns normalized points of all facial landmarks from 0 to 477
47 | with mp_face_mesh.FaceMesh(refine_landmarks=True) as face_mesh:
48 | results = face_mesh.process(image)
49 | face_landmarks = results.multi_face_landmarks[0].landmark
50 |
51 | # convert normalized points w.r.to image dimensions
52 | for idx, landmark in enumerate(face_landmarks):
53 | landmark_px = mp_drawing._normalized_to_pixel_coordinates(
54 | landmark.x, landmark.y, image.shape[1], image.shape[0]
55 | )
56 | # create a map of facial landmarks to (x,y) coordinates
57 | if landmark_px:
58 | landmark_cordinates[idx] = landmark_px
59 | return landmark_cordinates
60 |
61 |
62 | # based on input facial features create make w.r.to colors
63 | def add_mask(
64 | mask: np.array, idx_to_coordinates: dict, face_connections: list, colors: list
65 | ):
66 | """
67 | mask: image filled with 0's
68 | idx_to_coordinates : dict with (x,y) cordinates for each face landmarks
69 | face_connections : list of (x,y) cordinates for each facial features
70 | colors : list of [B,G,R] color for each features
71 | """
72 | for i, connection in enumerate(face_connections):
73 | # extract (x,y) w.r.to image for each cordinates
74 | points = np.array([idx_to_coordinates[idx] for idx in connection])
75 | # make a shape of feature in the mask and add color
76 | cv2.fillPoly(mask, [points], colors[i])
77 |
78 | # smoothening of image
79 | mask = cv2.GaussianBlur(mask, (7, 7), 4)
80 | return mask
--------------------------------------------------------------------------------