├── requirements.txt ├── main.py ├── sample.py ├── landmarks.py ├── README.md ├── .gitignore ├── api.py └── utils.py /requirements.txt: -------------------------------------------------------------------------------- 1 | absl-py==0.13.0 2 | asgiref==3.4.1 3 | attrs==21.2.0 4 | certifi==2021.5.30 5 | charset-normalizer==2.0.3 6 | click==8.0.1 7 | cycler==0.10.0 8 | fastapi==0.66.0 9 | h11==0.12.0 10 | httptools==0.2.0 11 | idna==3.2 12 | kiwisolver==1.3.1 13 | matplotlib==3.4.2 14 | mediapipe==0.8.6 15 | numpy==1.21.0 16 | opencv-contrib-python==4.5.3.56 17 | Pillow==8.3.1 18 | protobuf==3.17.3 19 | pydantic==1.8.2 20 | pyparsing==2.4.7 21 | python-dateutil==2.8.2 22 | python-dotenv==0.18.0 23 | python-multipart==0.0.5 24 | PyYAML==5.4.1 25 | requests==2.26.0 26 | six==1.16.0 27 | starlette==0.14.2 28 | typing-extensions==3.10.0.0 29 | urllib3==1.26.6 30 | uvicorn==0.14.0 31 | uvloop==0.15.3 32 | watchgod==0.7 33 | websockets==9.1 34 | -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | from utils import * 2 | 3 | # Video Input from Webcam 4 | video_capture = cv2.VideoCapture(0) 5 | while True: 6 | ret_val, frame = video_capture.read() 7 | frame = cv2.flip(frame, 1) 8 | if ret_val: 9 | cv2.imshow("Original", frame) 10 | feat_applied = apply_makeup(frame, True, 'lips', False) 11 | cv2.imshow("Feature", feat_applied) 12 | 13 | if cv2.waitKey(1) == 27: 14 | break 15 | 16 | # # Static Images 17 | # image = cv2.imread("model.jpg", cv2.IMREAD_UNCHANGED) 18 | # output = apply_makeup(image, False, 'foundation', False) 19 | # 20 | # cv2.imshow("Original", image) 21 | # cv2.imshow("Feature", output) 22 | # 23 | # cv2.waitKey(0) 24 | # cv2.destroyAllWindows() 25 | -------------------------------------------------------------------------------- /sample.py: -------------------------------------------------------------------------------- 1 | """ 2 | Sample script for making API call on webcam input. 3 | What did I even hope for? facepalm 4 | Can this work in real time? 5 | """ 6 | 7 | import cv2 8 | import requests 9 | import numpy as np 10 | from io import BytesIO 11 | from PIL import Image 12 | 13 | video_capture = cv2.VideoCapture(0) # Start video capture from the webcam 14 | while True: 15 | ret_val, frame = video_capture.read() # Read each frame 16 | if ret_val: # If frame is found 17 | frame = cv2.flip(frame, 1) # Flip the frame -> Selfie 18 | frame_bytes = cv2.imencode(".png", frame)[1].tobytes() # Convert image data to bytes, to send to the endpoint 19 | files = {"file": frame_bytes} 20 | payload = {"choice": "lips"} 21 | response = requests.post("http://127.0.0.1:8000/apply-makeup/", files=files, params=payload) # API Call 22 | if response.status_code == 200: 23 | image_content = response.content # Response from the API 24 | rec_image = np.array(Image.open(BytesIO(image_content))) # Convert the bytes response to numpy array 25 | rec_image = cv2.cvtColor(rec_image, cv2.COLOR_BGR2RGB) 26 | # Display both the original and the response 27 | cv2.imshow("Original", frame) 28 | cv2.imshow("Feature", rec_image) 29 | 30 | if cv2.waitKey(1) == 27: 31 | break 32 | 33 | cv2.destroyAllWindows() 34 | -------------------------------------------------------------------------------- /landmarks.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | from typing import List, Iterable 4 | from mediapipe.python.solutions.face_mesh import FaceMesh 5 | 6 | 7 | def detect_landmarks(src: np.ndarray, is_stream: bool = False): 8 | """ 9 | Given an image `src` retrieves the facial landmarks associated with it 10 | """ 11 | with FaceMesh(static_image_mode=not is_stream, max_num_faces=1) as face_mesh: 12 | results = face_mesh.process(cv2.cvtColor(src, cv2.COLOR_BGR2RGB)) 13 | if results.multi_face_landmarks: 14 | return results.multi_face_landmarks[0].landmark 15 | return None 16 | 17 | 18 | def normalize_landmarks(landmarks, height: int, width: int, mask: Iterable = None): 19 | """ 20 | The landmarks returned by mediapipe have coordinates between [0, 1]. 21 | This function normalizes them in the range of the image dimensions so they can be played with. 22 | """ 23 | normalized_landmarks = np.array([(int(landmark.x * width), int(landmark.y * height)) for landmark in landmarks]) 24 | if mask: 25 | normalized_landmarks = normalized_landmarks[mask] 26 | return normalized_landmarks 27 | 28 | 29 | def plot_landmarks(src: np.array, landmarks: List, show: bool = False): 30 | """ 31 | Given a source image and a list of landmarks plots them onto the image 32 | """ 33 | dst = src.copy() 34 | for x, y in landmarks: 35 | cv2.circle(dst, (x, y), 2, 0, cv2.FILLED) 36 | if show: 37 | print("Displaying image plotted with landmarks") 38 | cv2.imshow("Plotted Landmarks", dst) 39 | return dst 40 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Virtual-Makeup 2 | Python, OpenCV based virtual tryon for makeup lip-color, blush, foundation ~~and prolly eyewear too~~ 3 | 4 | These python scripts add "make up" on to an input. The input is either a static image of a person's face or live webcam feed. 5 | Currently only lipcolor and face blush is supported and the color of defaults to `rgb(157, 0, 153)` but it can be changed. 6 | 7 | # How to use 8 | 9 | 1. Clone this repository 10 | 2. Create a virtual environment using `python3 -m venv env` or anyother way of creating virtual envs 11 | 3. Install the requirements using `pip install -r requirements.txt` 12 | 4. To try the makeup process on the included model.jpg comment out the video capture code and uncomment the static image code and run `python main.py` 13 | 14 | # Sample outputs from ths implementation 15 | 16 | 17 | Original Sample |Blush Applied 18 | :-------------------------:|:-------------------------: 19 | ![Original Image](https://user-images.githubusercontent.com/40448838/125641690-4cc137cd-4e20-4e8b-bbc6-0d81f1a50f4a.png) | ![Light Pink blush applied](https://user-images.githubusercontent.com/40448838/125641612-e5075a25-7ab0-41d4-b1f6-e1d7e55ccccf.png) 20 | 21 | Original Sample |Lip Color applied 22 | :-------------------------:|:-------------------------: 23 | ![Original Image](https://user-images.githubusercontent.com/40448838/125641792-46761f24-6418-4004-9381-910f9fbe5ef0.png) | ![Image with Lip color applied](https://user-images.githubusercontent.com/40448838/125641817-c0755878-2358-4e51-92bb-87531a2e04da.png) 24 | 25 | 26 | Original Sample |Foundation applied 27 | :-------------------------:|:-------------------------: 28 | ![Original Image](https://user-images.githubusercontent.com/40448838/125849113-2f9e5147-ec37-4d85-991d-fe3b24240d83.png) | ![image](https://user-images.githubusercontent.com/40448838/125849001-54569ffd-194f-4755-a568-5f7f6a854e04.png) 29 | 30 | 31 | 32 | ## How it works 33 | Using mediapipe I detect 468 facial landmarks and and pull out the required landmarks (lips and cheek landmarks) and after that I use simple image processing techniques to achieve the end result 34 | 35 | ## File structure 36 | `main.py` -> Primary file, reads the input image and applies the makeup 37 | `api.py` -> Contains the fastapi endpoints 38 | `landmarks.py` -> Contains all the functionality for the landmarks (detection, normalization etc) 39 | `sample.py` -> A sample script that uses `python-requests` to demonstrate the endpoints 40 | `utils.py` -> Contains utility functions. 41 | 42 | ## Fast API 43 | Provides openapi spec by default, run using `uvicorn api:app` and navigate to localhost:8000/docs to view the openapi spec or to localhost:8000/redoc to view the redoc spec. Currently two API Endpoints are supported `apply-makeup` and `apply-feature` (Needs Fix, doesn't work as of now) 44 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 98 | __pypackages__/ 99 | 100 | # Celery stuff 101 | celerybeat-schedule 102 | celerybeat.pid 103 | 104 | # SageMath parsed files 105 | *.sage.py 106 | 107 | # Environments 108 | .env 109 | .venv 110 | env/ 111 | venv/ 112 | ENV/ 113 | env.bak/ 114 | venv.bak/ 115 | 116 | # Spyder project settings 117 | .spyderproject 118 | .spyproject 119 | 120 | # Rope project settings 121 | .ropeproject 122 | 123 | # mkdocs documentation 124 | /site 125 | 126 | # mypy 127 | .mypy_cache/ 128 | .dmypy.json 129 | dmypy.json 130 | 131 | # Pyre type checker 132 | .pyre/ 133 | 134 | # pytype static type analyzer 135 | .pytype/ 136 | 137 | # Cython debug symbols 138 | cython_debug/ 139 | 140 | # Files specific to the implementation, but can be ignored 141 | obama.jpg 142 | .idea 143 | foreground.py 144 | landmarks.py 145 | test.py 146 | model.png 147 | billie.jpg 148 | model-4.jpeg -------------------------------------------------------------------------------- /api.py: -------------------------------------------------------------------------------- 1 | """ 2 | API Endpoints using FastAPI for virtual try-on. 3 | TODO: Synchronous API endpoints are sufficient. Remove all async, await 4 | """ 5 | 6 | import numpy as np 7 | from fastapi import FastAPI, File, UploadFile 8 | from io import BytesIO 9 | from PIL import Image 10 | from utils import apply_makeup, apply_feature 11 | from starlette.responses import StreamingResponse 12 | import cv2 13 | import enum 14 | from typing import List 15 | 16 | app = FastAPI(title="API endpoints for virtual makeup", 17 | description="These API endpoints can be used to try virtual face makeup - lip_color, blush, foundation") 18 | 19 | 20 | class FeatureChoice(str, enum.Enum): 21 | """ 22 | An Enum for choice of feature. 23 | """ 24 | lips = 'lips' 25 | blush = 'blush' 26 | foundation = 'foundation' 27 | 28 | 29 | @app.get('/') 30 | def root(): 31 | return {"title": "Well...\nHello there! ", 32 | "message": "Nothing much to see here but HEY! try out the other endpoints. " 33 | "Hope you like them, you can read more about them at http://127.0.0.1:8000/docs"} 34 | 35 | 36 | @app.get('/apply-makeup/') 37 | def info_try_makeup(): 38 | """ 39 | ### Information about the post request on the same route. 40 | """ 41 | return { 42 | "message": "Perform a post request on the same route", 43 | "info": "A post request on this route with the necessary query parameters (choice, file) " 44 | "returns an image with the feature applied." 45 | } 46 | 47 | 48 | @app.post('/apply-makeup/') 49 | async def try_makeup(choice: FeatureChoice, file: UploadFile = File(...)): 50 | """ 51 | Given a choice (`lips`, `blush`, `foundation`) and an input image returns the output with the applied feature 52 | """ 53 | image = np.array(Image.open(BytesIO(await file.read()))) 54 | output = cv2.cvtColor(apply_makeup(image, False, choice.value, False), cv2.COLOR_BGR2RGB) 55 | ret_val, output = cv2.imencode(".png", output) 56 | return StreamingResponse(BytesIO(output), media_type="image/png") 57 | 58 | 59 | @app.get('/apply-feature/') 60 | def info_try_feature(): 61 | """ 62 | ### Information about the post request on the same route. 63 | """ 64 | return { 65 | "message": "Perform a post request on the same route", 66 | "info": "A post request on this route with the necessary query parameters (choice, file) " 67 | "returns an image with the feature applied.", 68 | "Note": "This method is specifically to reduce the processing load on the server, " 69 | "supply this with normalized landmark coordinates for best performance" 70 | } 71 | 72 | 73 | @app.post('/apply-feature/') 74 | async def try_feature(choice: FeatureChoice, landmarks: List[List[int]], normalize: bool, 75 | file: UploadFile = File(...)): 76 | """ 77 | Given a choice (`lips`, `blush`, `foundation`) and an input image returns the output with the applied feature. 78 | Specifically to **reduce the processing load on the server**, preferably detect and normalize the landmarks 79 | before making a call to this endpoint 80 | """ 81 | image = np.array(Image.open(BytesIO(await file.read()))) 82 | output = await apply_feature(image, choice, landmarks, normalize, False) 83 | return StreamingResponse(BytesIO(output), media_type="image/png") 84 | -------------------------------------------------------------------------------- /utils.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | from landmarks import detect_landmarks, normalize_landmarks, plot_landmarks 4 | from mediapipe.python.solutions.face_detection import FaceDetection 5 | 6 | upper_lip = [61, 185, 40, 39, 37, 0, 267, 269, 270, 408, 415, 272, 271, 268, 12, 38, 41, 42, 191, 78, 76] 7 | lower_lip = [61, 146, 91, 181, 84, 17, 314, 405, 320, 307, 308, 324, 318, 402, 317, 14, 87, 178, 88, 95] 8 | face_conn = [10, 338, 297, 332, 284, 251, 389, 264, 447, 376, 433, 288, 367, 397, 365, 379, 378, 400, 377, 152, 9 | 148, 176, 149, 150, 136, 172, 138, 213, 147, 234, 127, 162, 21, 54, 103, 67, 109] 10 | cheeks = [425, 205] 11 | 12 | 13 | def apply_makeup(src: np.ndarray, is_stream: bool, feature: str, show_landmarks: bool = False): 14 | """ 15 | Takes in a source image and applies effects onto it. 16 | """ 17 | ret_landmarks = detect_landmarks(src, is_stream) 18 | height, width, _ = src.shape 19 | feature_landmarks = None 20 | if feature == 'lips': 21 | feature_landmarks = normalize_landmarks(ret_landmarks, height, width, upper_lip + lower_lip) 22 | mask = lip_mask(src, feature_landmarks, [153, 0, 157]) 23 | output = cv2.addWeighted(src, 1.0, mask, 0.4, 0.0) 24 | elif feature == 'blush': 25 | feature_landmarks = normalize_landmarks(ret_landmarks, height, width, cheeks) 26 | mask = blush_mask(src, feature_landmarks, [153, 0, 157], 50) 27 | output = cv2.addWeighted(src, 1.0, mask, 0.3, 0.0) 28 | else: # Defaults to blush for any other thing 29 | skin_mask = mask_skin(src) 30 | output = np.where(src * skin_mask >= 1, gamma_correction(src, 1.75), src) 31 | if show_landmarks and feature_landmarks is not None: 32 | plot_landmarks(src, feature_landmarks, True) 33 | return output 34 | 35 | 36 | def apply_feature(src: np.ndarray, feature: str, landmarks: list, normalize: bool = False, 37 | show_landmarks: bool = False): 38 | """ 39 | Performs similar to `apply_makeup` but needs the landmarks explicitly 40 | Specifically implemented to reduce the computation on the server 41 | """ 42 | height, width, _ = src.shape 43 | if normalize: 44 | landmarks = normalize_landmarks(landmarks, height, width) 45 | if feature == 'lips': 46 | mask = lip_mask(src, landmarks, [153, 0, 157]) 47 | output = cv2.addWeighted(src, 1.0, mask, 0.4, 0.0) 48 | elif feature == 'blush': 49 | mask = blush_mask(src, landmarks, [153, 0, 157], 50) 50 | output = cv2.addWeighted(src, 1.0, mask, 0.3, 0.0) 51 | else: # Does not require any landmarks for skin masking -> Foundation 52 | skin_mask = mask_skin(src) 53 | output = np.where(src * skin_mask >= 1, gamma_correction(src, 1.75), src) 54 | if show_landmarks: # Refrain from using this during an API Call 55 | plot_landmarks(src, landmarks, True) 56 | return output 57 | 58 | 59 | def lip_mask(src: np.ndarray, points: np.ndarray, color: list): 60 | """ 61 | Given a src image, points of lips and a desired color 62 | Returns a colored mask that can be added to the src 63 | """ 64 | mask = np.zeros_like(src) # Create a mask 65 | mask = cv2.fillPoly(mask, [points], color) # Mask for the required facial feature 66 | # Blurring the region, so it looks natural 67 | # TODO: Get glossy finishes for lip colors, instead of blending in replace the region 68 | mask = cv2.GaussianBlur(mask, (7, 7), 5) 69 | return mask 70 | 71 | 72 | def blush_mask(src: np.ndarray, points: np.ndarray, color: list, radius: int): 73 | """ 74 | Given a src image, points of the cheeks, desired color and radius 75 | Returns a colored mask that can be added to the src 76 | """ 77 | # TODO: Make the effect more subtle 78 | mask = np.zeros_like(src) # Mask that will be used for the cheeks 79 | for point in points: 80 | mask = cv2.circle(mask, point, radius, color, cv2.FILLED) # Blush => Color filled circle 81 | x, y = point[0] - radius, point[1] - radius # Get the top-left of the mask 82 | mask[y:y + 2 * radius, x:x + 2 * radius] = vignette(mask[y:y + 2 * radius, x:x + 2 * radius], 83 | 10) # Vignette on the mask 84 | 85 | return mask 86 | 87 | 88 | def mask_skin(src: np.ndarray): 89 | """ 90 | Given a source image of a person (face image) 91 | returns a mask that can be identified as the skin 92 | """ 93 | lower = np.array([0, 133, 77], dtype='uint8') # The lower bound of skin color 94 | upper = np.array([255, 173, 127], dtype='uint8') # Upper bound of skin color 95 | dst = cv2.cvtColor(src, cv2.COLOR_BGR2YCR_CB) # Convert to YCR_CB 96 | skin_mask = cv2.inRange(dst, lower, upper) # Get the skin 97 | kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5)) 98 | skin_mask = cv2.dilate(skin_mask, kernel, iterations=2)[..., np.newaxis] # Dilate to fill in blobs 99 | 100 | if skin_mask.ndim != 3: 101 | skin_mask = np.expand_dims(skin_mask, axis=-1) 102 | return (skin_mask / 255).astype("uint8") # A binary mask containing only 1s and 0s 103 | 104 | 105 | def face_mask(src: np.ndarray, points: np.ndarray): 106 | """ 107 | Given a list of face landmarks, return a closed polygon mask for the same 108 | """ 109 | mask = np.zeros_like(src) 110 | mask = cv2.fillPoly(mask, [points], (255, 255, 255)) 111 | return mask 112 | 113 | 114 | def clicked_at(event, x, y, flags, params): 115 | """ 116 | A useful callback that spits out the landmark index when clicked on a particular landmark 117 | Note: Very sensitive to location, should be clicked exactly on the pixel 118 | """ 119 | # TODO: Add some atol to np.allclose 120 | if event == cv2.EVENT_LBUTTONDOWN: 121 | print(f"Clicked at {x, y}") 122 | point = np.array([x, y]) 123 | landmarks = params.get("landmarks", None) 124 | image = params.get("image", None) 125 | if landmarks is not None and image is not None: 126 | for idx, landmark in enumerate(landmarks): 127 | if np.allclose(landmark, point): 128 | print(f"Landmark: {idx}") 129 | break 130 | print("Found no landmark close to the click") 131 | 132 | 133 | def vignette(src: np.ndarray, sigma: int): 134 | """ 135 | Given a src image and a sigma, returns a vignette of the src 136 | """ 137 | height, width, _ = src.shape 138 | kernel_x = cv2.getGaussianKernel(width, sigma) 139 | kernel_y = cv2.getGaussianKernel(height, sigma) 140 | 141 | kernel = kernel_y * kernel_x.T 142 | mask = kernel / kernel.max() 143 | blurred = cv2.convertScaleAbs(src.copy() * np.expand_dims(mask, axis=-1)) 144 | return blurred 145 | 146 | 147 | def face_bbox(src: np.ndarray, offset_x: int = 0, offset_y: int = 0): 148 | """ 149 | Performs face detection on a src image, return bounding box coordinates with 150 | an optional offset applied to the coordinates 151 | """ 152 | height, width, _ = src.shape 153 | with FaceDetection(model_selection=0) as detector: # 0 -> dist <= 2mts from the camera 154 | results = detector.process(cv2.cvtColor(src, cv2.COLOR_BGR2RGB)) 155 | if not results.detections: 156 | return None 157 | results = results.detections[0].location_data 158 | x_min, y_min = results.relative_bounding_box.xmin, results.relative_bounding_box.ymin 159 | box_height, box_width = results.relative_bounding_box.height, results.relative_bounding_box.width 160 | x_min = int(width * x_min) - offset_x 161 | y_min = int(height * y_min) - offset_y 162 | box_height, box_width = int(height * box_height) + offset_y, int(width * box_width) + offset_x 163 | return (x_min, y_min), (box_height, box_width) 164 | 165 | 166 | def gamma_correction(src: np.ndarray, gamma: float, coefficient: int = 1): 167 | """ 168 | Performs gamma correction on a source image 169 | gamma > 1 => Darker Image 170 | gamma < 1 => Brighted Image 171 | """ 172 | dst = src.copy() 173 | dst = dst / 255. # Converted to float64 174 | dst = coefficient * np.power(dst, gamma) 175 | dst = (dst * 255).astype('uint8') 176 | return dst 177 | --------------------------------------------------------------------------------