├── data ├── exile.jpg ├── joshi.jpg ├── pastak.jpg └── uesaka.jpg ├── app.yaml ├── Dockerfile ├── test.py ├── requirements.txt ├── README.md ├── static └── style.css ├── .gitignore ├── main.py ├── templates └── index.html └── convert.py /data/exile.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/uiur/neo-meguro-line/HEAD/data/exile.jpg -------------------------------------------------------------------------------- /data/joshi.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/uiur/neo-meguro-line/HEAD/data/joshi.jpg -------------------------------------------------------------------------------- /data/pastak.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/uiur/neo-meguro-line/HEAD/data/pastak.jpg -------------------------------------------------------------------------------- /data/uesaka.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/uiur/neo-meguro-line/HEAD/data/uesaka.jpg -------------------------------------------------------------------------------- /app.yaml: -------------------------------------------------------------------------------- 1 | runtime: custom 2 | env: flex 3 | 4 | automatic_scaling: 5 | min_num_instances: 1 6 | max_num_instances: 10 7 | 8 | entrypoint: gunicorn -b :$PORT main:app 9 | service: mesen 10 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM gcr.io/google_appengine/python 2 | 3 | RUN apt-get update && apt-get install -y libopencv-dev python-opencv 4 | # workaround for opencv 5 | RUN ln /dev/null /dev/raw1394 6 | 7 | COPY . /app 8 | 9 | EXPOSE 8080 10 | RUN pip install -r requirements.txt 11 | 12 | CMD gunicorn -b :$PORT main:app 13 | -------------------------------------------------------------------------------- /test.py: -------------------------------------------------------------------------------- 1 | # just simple example test 2 | # generate result from images in test/ and human will check it. 3 | 4 | import os 5 | import glob 6 | 7 | os.system('rm -rf result && mkdir -p result') 8 | for path in glob.glob('./data/*'): 9 | os.system('python convert.py %s > %s' % (path, path.replace('/data/', '/result/'))) 10 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | click==6.6 2 | Flask==0.11.1 3 | google-api-python-client==1.5.3 4 | gunicorn==19.6.0 5 | httplib2==0.9.2 6 | itsdangerous==0.24 7 | Jinja2==2.8 8 | logging==0.4.9.6 9 | MarkupSafe==0.23 10 | numpy==1.11.2 11 | oauth2client==3.0.0 12 | Pillow==3.4.1 13 | pyasn1==0.1.9 14 | pyasn1-modules==0.0.8 15 | rsa==3.4.2 16 | simplejson==3.10.0 17 | six==1.10.0 18 | uritemplate==0.6 19 | Werkzeug==0.11.11 20 | wheel==0.24.0 21 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # mesen 2 | https://mesen-dot-uiu-server.appspot-preview.com/ 3 | 4 | ![image](https://cloud.githubusercontent.com/assets/116057/20457220/ab4bc510-aec9-11e6-9316-709ab6e57d91.png) 5 | 6 | mesen uses Google Cloud Vision API to detect face landmarks. 7 | 8 | ```sh 9 | export GOOGLE_APPLICATION_CREDENTIALS=credentials.json 10 | python main.py 11 | ``` 12 | 13 | ```sh 14 | curl -sL -X POST localhost:8080 -F 'image=@data/uesaka.jpg' | imgcat 15 | ``` 16 | 17 | or use as cli: 18 | 19 | ```sh 20 | python convert.py data/pastak.jpg > result.png 21 | ``` 22 | 23 | ## Run app on docker 24 | It uses docker to deploy to google app engine. 25 | 26 | You can launch the docker environment by following commands: 27 | ```sh 28 | docker build -t mesen . 29 | docker run -p 8080:8080 mesen 30 | ``` 31 | 32 | and open `http:://$(docker-machine ip):8080` 33 | 34 | ## Deploy 35 | It's running on a container with flexible enviroment in google app engine 36 | 37 | ```sh 38 | gcloud app deploy 39 | ``` 40 | 41 | -------------------------------------------------------------------------------- /static/style.css: -------------------------------------------------------------------------------- 1 | html { 2 | box-sizing: border-box; 3 | } 4 | 5 | *, *:before, *:after { 6 | box-sizing: inherit; 7 | } 8 | 9 | body { 10 | width: 100%; 11 | margin: 0; 12 | font-size: 14px; 13 | font-family: 'Avenir Next', 'Helvetica Neue', Helvetica, Arial, 'Hiragino Kaku Gothic ProN', Meiryo, 'MS PGothic', sans-serif; 14 | } 15 | 16 | h2 { 17 | font-size: 16px; 18 | } 19 | 20 | p { 21 | white-space: pre; 22 | } 23 | 24 | header { 25 | padding: 0 20px; 26 | } 27 | 28 | .choose-button { 29 | display: block; 30 | margin: 10px auto; 31 | width: 80%; 32 | border-radius: 4px; 33 | height: 60px; 34 | font-size: 20px; 35 | color: #fff; 36 | font-weight: bold; 37 | background-color: #6AA05A; 38 | border: 0; 39 | } 40 | 41 | .image { 42 | display: block; 43 | margin: 10px auto; 44 | width: 80%; 45 | } 46 | 47 | .form { 48 | display: none; 49 | } 50 | 51 | footer { 52 | text-align: center; 53 | margin-bottom: 20px; 54 | } 55 | 56 | .twitter-share-button-container { 57 | margin: 20px; 58 | } 59 | 60 | .fb_iframe_widget { 61 | vertical-align: top; 62 | display: inline-block !important; 63 | } 64 | 65 | @media (min-device-width: 800px) { 66 | body { 67 | width: 600px; 68 | margin: 0 auto; 69 | } 70 | 71 | header { 72 | width: 400px; 73 | padding: 0; 74 | margin: 0 auto; 75 | } 76 | 77 | .image { 78 | max-width: 400px; 79 | } 80 | 81 | .choose-button { 82 | max-width: 400px; 83 | } 84 | } 85 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | 2 | # Created by https://www.gitignore.io/api/python 3 | 4 | ### Python ### 5 | # Byte-compiled / optimized / DLL files 6 | __pycache__/ 7 | *.py[cod] 8 | *$py.class 9 | 10 | # C extensions 11 | *.so 12 | 13 | # Distribution / packaging 14 | .Python 15 | env/ 16 | build/ 17 | develop-eggs/ 18 | dist/ 19 | downloads/ 20 | eggs/ 21 | .eggs/ 22 | lib/ 23 | lib64/ 24 | parts/ 25 | sdist/ 26 | var/ 27 | *.egg-info/ 28 | .installed.cfg 29 | *.egg 30 | 31 | # PyInstaller 32 | # Usually these files are written by a python script from a template 33 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 34 | *.manifest 35 | *.spec 36 | 37 | # Installer logs 38 | pip-log.txt 39 | pip-delete-this-directory.txt 40 | 41 | # Unit test / coverage reports 42 | htmlcov/ 43 | .tox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *,cover 50 | .hypothesis/ 51 | 52 | # Translations 53 | *.mo 54 | *.pot 55 | 56 | # Django stuff: 57 | *.log 58 | local_settings.py 59 | 60 | # Flask stuff: 61 | instance/ 62 | .webassets-cache 63 | 64 | # Scrapy stuff: 65 | .scrapy 66 | 67 | # Sphinx documentation 68 | docs/_build/ 69 | 70 | # PyBuilder 71 | target/ 72 | 73 | # Jupyter Notebook 74 | .ipynb_checkpoints 75 | 76 | # pyenv 77 | .python-version 78 | 79 | # celery beat schedule file 80 | celerybeat-schedule 81 | 82 | # dotenv 83 | .env 84 | 85 | # virtualenv 86 | .venv/ 87 | venv/ 88 | ENV/ 89 | 90 | # Spyder project settings 91 | .spyderproject 92 | 93 | # Rope project settings 94 | .ropeproject 95 | 96 | result/ 97 | -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | import io 2 | import logging 3 | from flask import Flask, request, send_file, render_template 4 | import cv2 5 | import convert 6 | import numpy as np 7 | from PIL import Image, JpegImagePlugin 8 | 9 | app = Flask(__name__) 10 | 11 | 12 | def rotate_if_needed(bytes): 13 | convert_image = { 14 | 1: lambda img: img, 15 | 2: lambda img: img.transpose(Image.FLIP_LEFT_RIGHT), 16 | 3: lambda img: img.transpose(Image.ROTATE_180), 17 | 4: lambda img: img.transpose(Image.FLIP_TOP_BOTTOM), 18 | 5: lambda img: img.transpose(Image.FLIP_LEFT_RIGHT).transpose(Image.ROTATE_90), 19 | 6: lambda img: img.transpose(Image.ROTATE_270), 20 | 7: lambda img: img.transpose(Image.FLIP_LEFT_RIGHT).transpose(Image.ROTATE_270), 21 | 8: lambda img: img.transpose(Image.ROTATE_90), 22 | } 23 | 24 | img = Image.open(io.BytesIO(bytes)) 25 | 26 | # cloud vision api doesn't accept large image 27 | img.thumbnail((1600, 1600), Image.ANTIALIAS) 28 | new_img = img 29 | 30 | if img.format == "JPEG": 31 | exif = img._getexif() 32 | if exif: 33 | orientation = exif.get(0x112, 1) 34 | 35 | new_img = convert_image[orientation](img) 36 | 37 | return cv2.cvtColor(np.array(new_img), cv2.COLOR_BGR2RGB) 38 | 39 | 40 | @app.route('/', methods=['GET', 'POST']) 41 | def hello(): 42 | if request.method == 'POST': 43 | # limit is 4MB for Cloud Vision Api 44 | f = request.files['image'] 45 | 46 | image = rotate_if_needed(f.read()) 47 | data = convert.detect_face(image, 500) 48 | for annotation in data: 49 | convert.draw_black_line(image, annotation['landmarks']) 50 | 51 | return send_file( 52 | io.BytesIO(convert.image_to_bytes(image)), mimetype='image/png' 53 | ) 54 | 55 | else: 56 | return render_template('index.html') 57 | 58 | 59 | @app.route('/_ah/health') 60 | def health(): 61 | return 'ok' 62 | 63 | 64 | @app.errorhandler(500) 65 | def server_error(e): 66 | logging.exception('An error occurred during a request.') 67 | return """ 68 | An internal error occurred:
{}
69 | See logs for full stacktrace. 70 | """.format(e), 500 71 | 72 | 73 | if __name__ == '__main__': 74 | app.run(host='0.0.0.0', port=8080, debug=True) 75 | -------------------------------------------------------------------------------- /templates/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | ネオ目黒ライン NEO MEGURO LINE 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 23 | 24 | 25 |
26 | 33 | 34 |
35 |

36 | ネオ目黒ライン NEO MEGURO LINE 37 |

38 | 39 |

それはあなたの目に黒い線を引く 40 | It draws a black line on your eyes.

41 |
42 | 43 | 44 | 45 | 48 | 49 |
50 | 51 | 52 |
53 | 54 | 73 | 74 | 78 | 86 | 87 | 88 | 89 | -------------------------------------------------------------------------------- /convert.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | import argparse 4 | import base64 5 | from googleapiclient import discovery 6 | from oauth2client.client import GoogleCredentials 7 | 8 | 9 | def get_vision_service(): 10 | credentials = GoogleCredentials.get_application_default() 11 | return discovery.build('vision', 'v1', credentials=credentials) 12 | 13 | 14 | def detect_face(image, max_results=4): 15 | image_content = image_to_bytes(image) 16 | batch_request = [{ 17 | 'image': { 18 | 'content': base64.b64encode(image_content).decode('utf-8') 19 | }, 20 | 'features': [{ 21 | 'type': 'FACE_DETECTION', 22 | 'maxResults': max_results, 23 | }] 24 | }] 25 | 26 | service = get_vision_service() 27 | request = service.images().annotate(body={ 28 | 'requests': batch_request, 29 | }) 30 | response = request.execute() 31 | first_response = response['responses'][0] 32 | if 'error' in first_response: 33 | print(first_response['error']) 34 | raise 35 | 36 | if 'faceAnnotations' not in first_response: 37 | return [] 38 | 39 | return first_response['faceAnnotations'] 40 | 41 | 42 | def image_to_bytes(image): 43 | flag, buf = cv2.imencode('.png', image) 44 | return buf.tobytes() 45 | 46 | 47 | def point_to_vector(p): 48 | return np.array([p['x'], p['y']]) 49 | 50 | 51 | def draw_black_line(image, positions): 52 | PADDING_VERTICAL_RATIO = 1.25 53 | PADDING_HORIZONTAL_RATIO = 0.4 54 | 55 | type_to_position = {} 56 | for position in positions: 57 | p = position['position'] 58 | for k, v in p.items(): 59 | p[k] = int(v) 60 | 61 | type_to_position[position['type']] = p 62 | 63 | left = point_to_vector(type_to_position['LEFT_EYE']) 64 | right = point_to_vector(type_to_position['RIGHT_EYE']) 65 | 66 | left_top = np.array(left) 67 | left_bottom = np.array(left) 68 | 69 | right_top = np.array(right) 70 | right_bottom = np.array(right) 71 | 72 | horizontal_direction = right - left 73 | normal = np.array([horizontal_direction[1], -horizontal_direction[0]], int) 74 | normal = normal / np.linalg.norm(normal) 75 | 76 | # vertical 77 | left_height = np.linalg.norm(point_to_vector(type_to_position['LEFT_EYE_BOTTOM_BOUNDARY']) - point_to_vector(type_to_position['LEFT_EYE_TOP_BOUNDARY'])) 78 | right_height = np.linalg.norm(point_to_vector(type_to_position['RIGHT_EYE_BOTTOM_BOUNDARY']) - point_to_vector(type_to_position['RIGHT_EYE_TOP_BOUNDARY'])) 79 | 80 | height = max(left_height, right_height) 81 | left_top += np.array(height * PADDING_VERTICAL_RATIO * normal, int) 82 | left_bottom -= np.array(height * PADDING_VERTICAL_RATIO * normal, int) 83 | 84 | right_top += np.array(height * PADDING_VERTICAL_RATIO * normal, int) 85 | right_bottom -= np.array(height * PADDING_VERTICAL_RATIO * normal, int) 86 | 87 | horizontal_pad = np.array(PADDING_HORIZONTAL_RATIO * (right - left), int) 88 | left_top -= horizontal_pad 89 | left_bottom -= horizontal_pad 90 | right_top += horizontal_pad 91 | right_bottom += horizontal_pad 92 | 93 | cv2.fillPoly(image, [np.array([ 94 | left_top, 95 | left_bottom, 96 | right_bottom, 97 | right_top, 98 | ])], color=(0, 0, 0), lineType=cv2.CV_AA) 99 | 100 | 101 | if __name__ == "__main__": 102 | parser = argparse.ArgumentParser() 103 | parser.add_argument('image', help='a path to image') 104 | args = parser.parse_args() 105 | 106 | image = cv2.imread(args.image) 107 | data = detect_face(image, 15) 108 | 109 | for annotation in data: 110 | draw_black_line(image, annotation['landmarks']) 111 | 112 | print(image_to_bytes(image)) 113 | --------------------------------------------------------------------------------