├── .github
└── workflows
│ └── ci.yml
├── .gitignore
├── Aptfile
├── Procfile
├── README.md
├── app.py
├── config.py
├── images
└── webappscreen.jpg
├── models
├── lotr_mlp_10c_labelencoder.pickle
├── lotr_mlp_10c_recognizer.pickle
├── lotr_mlp_2c_labelencoder.pickle
├── lotr_mlp_2c_recognizer.pickle
├── lotr_mlp_5c_labelencoder.pickle
├── lotr_mlp_5c_recognizer.pickle
├── lotr_nb_10c_labelencoder.pickle
├── lotr_nb_10c_recognizer.pickle
├── lotr_nb_2c_labelencoder.pickle
├── lotr_nb_2c_recognizer.pickle
├── lotr_nb_5c_labelencoder.pickle
├── lotr_nb_5c_recognizer.pickle
├── lotr_svm_10c_labelencoder.pickle
├── lotr_svm_10c_recognizer.pickle
├── lotr_svm_2c_labelencoder.pickle
├── lotr_svm_2c_recognizer.pickle
├── lotr_svm_5c_labelencoder.pickle
├── lotr_svm_5c_recognizer.pickle
└── openface_nn4.small2.v1.t7
├── presentation
└── FaceRecognitionWebAppTutorial.pdf
├── requirements-dev.txt
├── requirements.txt
├── runtime.txt
├── source
├── __init__.py
├── demo_script.py
├── embedding_extraction.py
├── face_recognition.py
├── model_training.py
├── resize_images.py
└── utils.py
├── templates
└── index.html
├── tests
├── __init__.py
├── data
│ └── gollum4.jpg
└── test_api.py
└── tutorial
├── tutorial.html
└── tutorial.ipynb
/.github/workflows/ci.yml:
--------------------------------------------------------------------------------
1 | name: CI
2 |
3 | on:
4 | push:
5 | branches: [ master ]
6 | pull_request:
7 | branches: [ master ]
8 |
9 | jobs:
10 | build:
11 |
12 | runs-on: ubuntu-latest
13 | strategy:
14 | matrix:
15 | operating-system: [ubuntu-latest, windows-latest, macos-latest]
16 | python-version: [3.6, 3.7, 3.8]
17 | fail-fast: false
18 |
19 | steps:
20 | - name: Checkout
21 | uses: actions/checkout@v2
22 | - name: Set up Python
23 | uses: actions/setup-python@v2
24 | with:
25 | python-version: ${{ matrix.python-version }}
26 | - name: Restore Ubuntu cache
27 | uses: actions/cache@v1
28 | if: matrix.operating-system == 'ubuntu-latest'
29 | with:
30 | path: ~/.cache/pip
31 | key: ${{ matrix.os }}-${{ matrix.python-version }}-${{ hashFiles('**/setup.py')}}
32 | restore-keys: ${{ matrix.os }}-${{ matrix.python-version }}-
33 | - name: Restore MacOS cache
34 | uses: actions/cache@v1
35 | if: matrix.operating-system == 'macos-latest'
36 | with:
37 | path: ~/Library/Caches/pip
38 | key: ${{ matrix.os }}-${{ matrix.python-version }}-${{ hashFiles('**/setup.py')}}
39 | restore-keys: ${{ matrix.os }}-${{ matrix.python-version }}-
40 | - name: Restore Windows cache
41 | uses: actions/cache@v1
42 | if: matrix.operating-system == 'windows-latest'
43 | with:
44 | path: ~\AppData\Local\pip\Cache
45 | key: ${{ matrix.os }}-${{ matrix.python-version }}-${{ hashFiles('**/setup.py')}}
46 | restore-keys: ${{ matrix.os }}-${{ matrix.python-version }}-
47 | - name: Update pip
48 | run: python -m pip install --upgrade pip
49 | - name: Install dependencies
50 | run: |
51 | pip install setuptools>=42.0.0
52 | pip install -r requirements.txt
53 | - name: Test with unittest
54 | run: |
55 | python -m unittest
56 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | *.egg-info/
24 | .installed.cfg
25 | *.egg
26 | MANIFEST
27 |
28 | # PyInstaller
29 | # Usually these files are written by a python script from a template
30 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
31 | *.manifest
32 | *.spec
33 |
34 | # Installer logs
35 | pip-log.txt
36 | pip-delete-this-directory.txt
37 |
38 | # Unit test / coverage reports
39 | htmlcov/
40 | .tox/
41 | .coverage
42 | .coverage.*
43 | .cache
44 | nosetests.xml
45 | coverage.xml
46 | *.cover
47 | .hypothesis/
48 | .pytest_cache/
49 |
50 | # Translations
51 | *.mo
52 | *.pot
53 |
54 | # Django stuff:
55 | *.log
56 | local_settings.py
57 | db.sqlite3
58 |
59 | # Flask stuff:
60 | instance/
61 | .webassets-cache
62 |
63 | # Scrapy stuff:
64 | .scrapy
65 |
66 | # Sphinx documentation
67 | docs/_build/
68 |
69 | # PyBuilder
70 | target/
71 |
72 | # Jupyter Notebook
73 | .ipynb_checkpoints
74 |
75 | # pyenv
76 | .python-version
77 |
78 | # celery beat schedule file
79 | celerybeat-schedule
80 |
81 | # SageMath parsed files
82 | *.sage.py
83 |
84 | # Environments
85 | .env
86 | .venv
87 | env/
88 | venv/
89 | ENV/
90 | env.bak/
91 | venv.bak/
92 |
93 | # Spyder project settings
94 | .spyderproject
95 | .spyproject
96 |
97 | # Rope project settings
98 | .ropeproject
99 |
100 | # mkdocs documentation
101 | /site
102 |
103 | # mypy
104 | .mypy_cache/
105 |
106 | lotr
107 | images.zip
--------------------------------------------------------------------------------
/Aptfile:
--------------------------------------------------------------------------------
1 | libsm6
2 | libxrender1
3 | libfontconfig1
4 | libice6
--------------------------------------------------------------------------------
/Procfile:
--------------------------------------------------------------------------------
1 | web: gunicorn app:app --log-file=-
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Flask Face Recognition
2 |
3 |
4 |
5 | A face recognition API using Python, Flask, Opencv, Pytorch, Heroku.
6 |
7 | Live demo: https://face-recognition-api-flask.herokuapp.com (Temporarily unavailable due to [this issue](https://github.com/adriangb/scikeras/issues/221))
8 |
9 | [Tutorial notebook](/tutorial/tutorial.ipynb) | [Tutorial presentation](/presentation/FaceRecognitionWebAppTutorial.pdf)
10 |
11 | 
12 |
13 | # App Usage
14 |
15 | Run face detection app from [face-detection-app-tutorial repo](https://github.com/fcakyon/face-detection-app-tutorial):
16 |
17 | ```console
18 | git clone https://github.com/fcakyon/face-detection-app-tutorial.git
19 | cd face-detection-app-tutorial
20 | python app.py
21 | ```
22 |
23 | Then run face recognition app from this repo:
24 |
25 | ```console
26 | git clone https://github.com/fcakyon/face-recognition-app-tutorial.git
27 | cd face-recognition-app-tutorial
28 | python app.py
29 | ```
30 |
--------------------------------------------------------------------------------
/app.py:
--------------------------------------------------------------------------------
1 | import os
2 | from flask import Flask,jsonify,request,render_template
3 | from source.face_recognition import recognize_faces
4 | from source.utils import draw_rectangles, read_image, prepare_image
5 | from source.model_training import create_mlp_model
6 |
7 | app = Flask(__name__)
8 |
9 | app.config.from_object('config')
10 | UPLOAD_FOLDER = os.path.basename('uploads')
11 | app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
12 |
13 | @app.route('/')
14 | def home():
15 | return render_template('index.html')
16 |
17 | @app.route('/recognize', methods=['POST'])
18 | def detect():
19 | file = request.files['image']
20 |
21 | # Read image
22 | image = read_image(file)
23 |
24 | # Recognize faces
25 | classifier_model_path = "models" + os.sep + "lotr_mlp_10c_recognizer.pickle"
26 | label_encoder_path = "models" + os.sep + "lotr_mlp_10c_labelencoder.pickle"
27 | faces = recognize_faces(image, classifier_model_path, label_encoder_path, detection_api_url=app.config["DETECTION_API_URL"])
28 |
29 | return jsonify(recognitions = faces)
30 |
31 | @app.route('/upload', methods=['POST'])
32 | def upload():
33 | file = request.files['image']
34 |
35 | # Read image
36 | image = read_image(file)
37 |
38 | # Recognize faces
39 | classifier_model_path = "models" + os.sep + "lotr_mlp_10c_recognizer.pickle"
40 | label_encoder_path = "models" + os.sep + "lotr_mlp_10c_labelencoder.pickle"
41 | faces = recognize_faces(image, classifier_model_path, label_encoder_path, detection_api_url=app.config["DETECTION_API_URL"])
42 |
43 | # Draw detection rects
44 | draw_rectangles(image, faces)
45 |
46 | # Prepare image for html
47 | to_send = prepare_image(image)
48 |
49 | return render_template('index.html', face_recognized=len(faces)>0, num_faces=len(faces), image_to_show=to_send, init=True)
50 |
51 | if __name__ == '__main__':
52 | app.run(debug=True,
53 | use_reloader=True,
54 | port=4000)
55 |
--------------------------------------------------------------------------------
/config.py:
--------------------------------------------------------------------------------
1 | DETECTION_API_URL = "http://127.0.0.1:3000/"
--------------------------------------------------------------------------------
/images/webappscreen.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fcakyon/face-recognition-app-tutorial/d51ebfbeadcdc5e869df90f56948cdb46ef6af4a/images/webappscreen.jpg
--------------------------------------------------------------------------------
/models/lotr_mlp_10c_labelencoder.pickle:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fcakyon/face-recognition-app-tutorial/d51ebfbeadcdc5e869df90f56948cdb46ef6af4a/models/lotr_mlp_10c_labelencoder.pickle
--------------------------------------------------------------------------------
/models/lotr_mlp_10c_recognizer.pickle:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fcakyon/face-recognition-app-tutorial/d51ebfbeadcdc5e869df90f56948cdb46ef6af4a/models/lotr_mlp_10c_recognizer.pickle
--------------------------------------------------------------------------------
/models/lotr_mlp_2c_labelencoder.pickle:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fcakyon/face-recognition-app-tutorial/d51ebfbeadcdc5e869df90f56948cdb46ef6af4a/models/lotr_mlp_2c_labelencoder.pickle
--------------------------------------------------------------------------------
/models/lotr_mlp_2c_recognizer.pickle:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fcakyon/face-recognition-app-tutorial/d51ebfbeadcdc5e869df90f56948cdb46ef6af4a/models/lotr_mlp_2c_recognizer.pickle
--------------------------------------------------------------------------------
/models/lotr_mlp_5c_labelencoder.pickle:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fcakyon/face-recognition-app-tutorial/d51ebfbeadcdc5e869df90f56948cdb46ef6af4a/models/lotr_mlp_5c_labelencoder.pickle
--------------------------------------------------------------------------------
/models/lotr_mlp_5c_recognizer.pickle:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fcakyon/face-recognition-app-tutorial/d51ebfbeadcdc5e869df90f56948cdb46ef6af4a/models/lotr_mlp_5c_recognizer.pickle
--------------------------------------------------------------------------------
/models/lotr_nb_10c_labelencoder.pickle:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fcakyon/face-recognition-app-tutorial/d51ebfbeadcdc5e869df90f56948cdb46ef6af4a/models/lotr_nb_10c_labelencoder.pickle
--------------------------------------------------------------------------------
/models/lotr_nb_10c_recognizer.pickle:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fcakyon/face-recognition-app-tutorial/d51ebfbeadcdc5e869df90f56948cdb46ef6af4a/models/lotr_nb_10c_recognizer.pickle
--------------------------------------------------------------------------------
/models/lotr_nb_2c_labelencoder.pickle:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fcakyon/face-recognition-app-tutorial/d51ebfbeadcdc5e869df90f56948cdb46ef6af4a/models/lotr_nb_2c_labelencoder.pickle
--------------------------------------------------------------------------------
/models/lotr_nb_2c_recognizer.pickle:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fcakyon/face-recognition-app-tutorial/d51ebfbeadcdc5e869df90f56948cdb46ef6af4a/models/lotr_nb_2c_recognizer.pickle
--------------------------------------------------------------------------------
/models/lotr_nb_5c_labelencoder.pickle:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fcakyon/face-recognition-app-tutorial/d51ebfbeadcdc5e869df90f56948cdb46ef6af4a/models/lotr_nb_5c_labelencoder.pickle
--------------------------------------------------------------------------------
/models/lotr_nb_5c_recognizer.pickle:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fcakyon/face-recognition-app-tutorial/d51ebfbeadcdc5e869df90f56948cdb46ef6af4a/models/lotr_nb_5c_recognizer.pickle
--------------------------------------------------------------------------------
/models/lotr_svm_10c_labelencoder.pickle:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fcakyon/face-recognition-app-tutorial/d51ebfbeadcdc5e869df90f56948cdb46ef6af4a/models/lotr_svm_10c_labelencoder.pickle
--------------------------------------------------------------------------------
/models/lotr_svm_10c_recognizer.pickle:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fcakyon/face-recognition-app-tutorial/d51ebfbeadcdc5e869df90f56948cdb46ef6af4a/models/lotr_svm_10c_recognizer.pickle
--------------------------------------------------------------------------------
/models/lotr_svm_2c_labelencoder.pickle:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fcakyon/face-recognition-app-tutorial/d51ebfbeadcdc5e869df90f56948cdb46ef6af4a/models/lotr_svm_2c_labelencoder.pickle
--------------------------------------------------------------------------------
/models/lotr_svm_2c_recognizer.pickle:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fcakyon/face-recognition-app-tutorial/d51ebfbeadcdc5e869df90f56948cdb46ef6af4a/models/lotr_svm_2c_recognizer.pickle
--------------------------------------------------------------------------------
/models/lotr_svm_5c_labelencoder.pickle:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fcakyon/face-recognition-app-tutorial/d51ebfbeadcdc5e869df90f56948cdb46ef6af4a/models/lotr_svm_5c_labelencoder.pickle
--------------------------------------------------------------------------------
/models/lotr_svm_5c_recognizer.pickle:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fcakyon/face-recognition-app-tutorial/d51ebfbeadcdc5e869df90f56948cdb46ef6af4a/models/lotr_svm_5c_recognizer.pickle
--------------------------------------------------------------------------------
/models/openface_nn4.small2.v1.t7:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fcakyon/face-recognition-app-tutorial/d51ebfbeadcdc5e869df90f56948cdb46ef6af4a/models/openface_nn4.small2.v1.t7
--------------------------------------------------------------------------------
/presentation/FaceRecognitionWebAppTutorial.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fcakyon/face-recognition-app-tutorial/d51ebfbeadcdc5e869df90f56948cdb46ef6af4a/presentation/FaceRecognitionWebAppTutorial.pdf
--------------------------------------------------------------------------------
/requirements-dev.txt:
--------------------------------------------------------------------------------
1 | flask==1.1.1
2 | gunicorn==19.9.0
3 | opencv-contrib-python>=4.0
4 | matplotlib==3.1.3
5 | jupyter==1.0.0
6 | imutils==0.5.3
7 | scipy==1.5.4
8 | scikit-learn==0.20.0
9 | tensorflow==2.4.1
10 | scikeras==0.2.1
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | flask==1.1.1
2 | gunicorn==19.9.0
3 | opencv-contrib-python>=4.0
4 | matplotlib==3.1.3
5 | imutils==0.5.3
6 | requests==2.22.0
7 | scipy==1.5.4
8 | scikit-learn==0.24.1
9 | tensorflow==2.4.1
10 | scikeras==0.2.1
--------------------------------------------------------------------------------
/runtime.txt:
--------------------------------------------------------------------------------
1 | python-3.8.8
2 |
--------------------------------------------------------------------------------
/source/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 |
--------------------------------------------------------------------------------
/source/demo_script.py:
--------------------------------------------------------------------------------
1 | import os
2 | import cv2
3 | import imutils
4 | from source.utils import draw_rectangles
5 | from source.embedding_extraction import extract_embeddings
6 | from source.face_recognition import recognize_faces, extract_faces
7 | from source.model_training import train_mlp_model, train_svm_model, train_nb_model
8 |
9 | # extract faces
10 | raw_image_dir = "images" + os.sep + "lotr" + os.sep + "train" + os.sep + "raw"
11 | extract_faces(raw_image_dir)
12 |
13 | # extract embeddings
14 | classes_dir = "images" + os.sep + "lotr" + os.sep + "train" + os.sep + "10_classes"
15 | embeddings_path = "images" + os.sep + "lotr" + os.sep + "train" + os.sep + "embeddings.pickle"
16 | extract_embeddings(classes_dir, embeddings_path)
17 |
18 | # train nb classifier
19 | classifier_model_path = "models" + os.sep + "lotr_nb_recognizer.pickle"
20 | label_encoder_path = "models" + os.sep + "lotr_nb_le.pickle"
21 | train_nb_model(embeddings_path, classifier_model_path, label_encoder_path)
22 |
23 | # train svm classifier
24 | classifier_model_path = "models" + os.sep + "lotr_svm_recognizer.pickle"
25 | label_encoder_path = "models" + os.sep + "lotr_svm_le.pickle"
26 | train_svm_model(embeddings_path, classifier_model_path, label_encoder_path)
27 |
28 | # train mlp classifier
29 | classifier_model_path = "models" + os.sep + "lotr_mlp_recognizer.pickle"
30 | label_encoder_path = "models" + os.sep + "lotr_mlp_le.pickle"
31 | train_mlp_model(embeddings_path, classifier_model_path, label_encoder_path)
32 |
33 | # recognize face
34 | file_path = "images" + os.sep + "lotr" + os.sep + "test" + os.sep + "raw" + os.sep +"legolas6.jpg"
35 | image = cv2.imread(file_path)
36 | image = imutils.resize(image, width=600)
37 | recognitions = recognize_faces(image, classifier_model_path, label_encoder_path)
38 |
39 | draw_rectangles(image, recognitions)
40 | cv2.imshow("recognition result",image)
41 | cv2.waitKey()
42 |
--------------------------------------------------------------------------------
/source/embedding_extraction.py:
--------------------------------------------------------------------------------
1 | import os
2 | import cv2
3 | import pickle
4 | from imutils import paths
5 |
6 | def extract_embeddings(classes_dir = "", embeddings_path = ""):
7 | # Get models directory
8 | models_dir = "models" + os.sep
9 |
10 | # load our serialized face embedding model from disk
11 | print("[INFO] loading face embedding extractor...")
12 | face_embedding_model_filename = "openface_nn4.small2.v1.t7"
13 | embedder = cv2.dnn.readNetFromTorch(models_dir + face_embedding_model_filename)
14 |
15 | # Grab the paths to the input images in our dataset
16 | print("[INFO] quantifying faces...")
17 | image_paths = list(paths.list_images(classes_dir))
18 |
19 | # Initialize our lists of extracted facial embeddings and
20 | # corresponding character names
21 | known_embeddings = []
22 | known_names = []
23 |
24 | # Initialize the total number of faces processed
25 | total = 0
26 |
27 | # Loop over the image paths
28 | for (i, image_path) in enumerate(image_paths):
29 |
30 | # Extract the person name from the image path
31 | name = image_path.split(os.sep)[-2]
32 |
33 | print("[INFO] processing image {}/{}".format(i + 1, len(image_paths)) + " - " + image_path.split(os.sep)[-1])
34 |
35 | # Load the image
36 | face = cv2.imread(image_path)
37 |
38 | # Construct a blob for the face ROI, then pass the blob
39 | # through our face embedding model to obtain the 128-d
40 | # quantification of the face
41 | face_blob = cv2.dnn.blobFromImage(face, 1.0 / 255, (96, 96),
42 | (0, 0, 0), swapRB=True, crop=False)
43 | embedder.setInput(face_blob)
44 | vec = embedder.forward()
45 |
46 | # Add the name of the person + corresponding face
47 | # embedding to their respective lists
48 | known_names.append(name)
49 | known_embeddings.append(vec.flatten())
50 | total += 1
51 |
52 | # Dump the facial embeddings + names to disk as pickle
53 | print("[INFO] serializing {} encodings...".format(total))
54 | data = {"embeddings": known_embeddings, "names": known_names}
55 | with open(embeddings_path, "wb") as write_file:
56 | pickle.dump(data, write_file)
--------------------------------------------------------------------------------
/source/face_recognition.py:
--------------------------------------------------------------------------------
1 | import os
2 | import cv2
3 | import pickle
4 | import requests
5 | import numpy as np
6 | from imutils import paths
7 |
8 | def detect_faces(image, detection_api_url="http://127.0.0.1:3000/"):
9 | # Apply previously implemented deep learning-based face detector to
10 | # localize faces in the input image
11 | image = cv2.imencode('.jpg', image)[1].tostring()
12 |
13 | # Send request to detection_api_url
14 | if not detection_api_url[-1]=="/":
15 | detection_api_url += "/"
16 | try:
17 | response = requests.post(detection_api_url+"detect", files={'image': image})
18 | except:
19 | Exception("You need to run detection app as described in https://github.com/fcakyon/face-recognition-app-tutorial#app-usage")
20 |
21 | # Convert response to json object (dictionary)
22 | response_json = response.json()
23 | # Convert response to json object (dictionary)
24 | detections = response_json["detections"]
25 | return detections
26 |
27 | def extract_faces(raw_image_dir = "", detection_api_url="http://127.0.0.1:3000/"):
28 | # Extract faces from the images in images/base folder
29 | min_confidence = 30
30 |
31 | # Create export dir
32 | faces_dir = raw_image_dir + os.sep + ".." + os.sep + 'faces'
33 | os.makedirs(faces_dir, exist_ok=True)
34 |
35 | # Grab the paths to the input images in our dataset
36 | print("[INFO] quantifying faces...")
37 | image_paths = list(paths.list_images(raw_image_dir))
38 |
39 | index = 0
40 | # Loop over the image paths
41 | for (i, image_path) in enumerate(image_paths):
42 | # Extract the person name from the image path
43 | print("[INFO] processing image {}/{}".format(i + 1,
44 | len(image_paths)))
45 |
46 | # Load the image and then resize it
47 | image = cv2.imread(image_path)
48 | # Get image shape
49 | (image_height, image_width) = image.shape[:2]
50 | resized_image = cv2.resize(image, (300, 300))
51 | #image = resize(image, width=600)
52 |
53 | print("[INFO] performing face detection over api for: " + image_path.split("\\")[-1])
54 | detections = detect_faces(resized_image, detection_api_url)
55 | #detections = detect_faces(image)
56 |
57 | # Ensure at least one face was found
58 | if len(detections) > 0:
59 | for detection in detections:
60 | # Extract the confidence (i.e., probability) associated with the
61 | # prediction
62 | confidence = detection["prob"]
63 |
64 | # Get detection coords
65 | [start_x, start_y, end_x, end_y] = detection["rect"]
66 | # Correct the detections regions
67 | start_x = int(start_x/300*image_width)
68 | start_y = int(start_y/300*image_height)
69 | end_x = int(end_x/300*image_width)
70 | end_y = int(end_y/300*image_height)
71 |
72 | # Ensure that the detection with the largest probability also
73 | # means our minimum probability test (thus helping filter out
74 | # weak detections)
75 | if confidence > min_confidence:
76 | # Extract the face ROI
77 | face = image[start_y :end_y, start_x : end_x]
78 | (fH, fW) = face.shape[:2]
79 |
80 | # Ensure the face width and height are sufficiently large
81 | if fW < 20 or fH < 20:
82 | continue
83 |
84 | face_path = raw_image_dir + os.sep + ".." + os.sep + 'faces' + os.sep + "face" + '_' + str(index) + ".jpg"
85 | print("face_path: " + face_path)
86 | cv2.imwrite(face_path, face)
87 | index = index + 1
88 |
89 | def recognize_faces(image, classifier_model_path, label_encoder_path, detection_api_url="http://127.0.0.1:3000/"):
90 | '''Recognize faces in an image'''
91 | faces_list = []
92 | min_detection_confidence = 20 # percent
93 |
94 | # Load our serialized face embedding model from disk
95 | print("[INFO] loading face recognizer...")
96 | models_dir = "models" + os.sep
97 | face_embedding_model_filename = "openface_nn4.small2.v1.t7"
98 | embedder = cv2.dnn.readNetFromTorch(models_dir + face_embedding_model_filename)
99 |
100 | # Load the actual face recognition model along with the label encoder
101 | recognizer = pickle.loads(open(classifier_model_path, "rb").read())
102 | label_encoder = pickle.loads(open(label_encoder_path, "rb").read())
103 |
104 | print("[INFO] performing face detection over api...")
105 | detections = detect_faces(image, detection_api_url)
106 |
107 | print("[INFO] performing face recognition...")
108 | # Loop over the detections
109 | for detection in detections:
110 | # Get detection region
111 | [start_x, start_y, end_x, end_y] = detection["rect"]
112 | # Extract the confidence (i.e., probability) associated with the
113 | # prediction
114 | detection_confidence = detection["prob"]
115 |
116 | # Filter out weak detections
117 | if detection_confidence > min_detection_confidence:
118 | # Extract the face ROI
119 | face = image[start_y:end_y, start_x:end_x]
120 | (fH, fW) = face.shape[:2]
121 |
122 | # Ensure the face width and height are sufficiently large
123 | if fW < 20 or fH < 20:
124 | continue
125 |
126 | # Construct a blob for the face ROI, then pass the blob
127 | # through our face embedding model to obtain the 128-d
128 | # quantification of the face
129 | face_blob = cv2.dnn.blobFromImage(face, 1.0 / 255, (96, 96),
130 | (0, 0, 0), swapRB=True, crop=False)
131 | embedder.setInput(face_blob)
132 | vec = embedder.forward()
133 |
134 | # Perform classification to recognize the face
135 | preds = recognizer.predict_proba(vec)
136 | j = np.argmax(preds)
137 | # Get recognition confidence
138 | try:
139 | recognition_confidence = preds[j]
140 | except:
141 | recognition_confidence = preds[0][j]
142 | # Convert it to a native python variable (float)
143 | recognition_confidence = recognition_confidence.item()
144 | # Get recognition class name
145 | name = label_encoder.classes_[j]
146 | # Convert it to a native python variable (str)
147 | name = name.item()
148 |
149 | # Append results to list
150 | face_dict = {}
151 | face_dict['rect'] = [start_x, start_y, end_x, end_y]
152 | face_dict['detection_prob'] = detection_confidence
153 | face_dict['recognition_prob'] = recognition_confidence * 100
154 | face_dict['name'] = name
155 | faces_list.append(face_dict)
156 |
157 | # Return the face image area, the face rectangle, and face name
158 | return faces_list
159 |
--------------------------------------------------------------------------------
/source/model_training.py:
--------------------------------------------------------------------------------
1 | # import the necessary packages
2 | from sklearn.preprocessing import LabelEncoder
3 | import numpy as np
4 | import pickle
5 |
6 | from tensorflow.keras.models import Sequential
7 | from tensorflow.keras.layers import Dense
8 | from scikeras.wrappers import KerasClassifier
9 | from tensorflow.keras.optimizers import Adam
10 |
11 | from sklearn.svm import SVC
12 |
13 | from sklearn.naive_bayes import GaussianNB
14 |
15 | def create_mlp_model(optimizer='adam', neuron_number=50, lr=0.001, class_number=5):
16 | # Build function for keras/tensorflow based multi layer perceptron implementation
17 | model = Sequential()
18 | model.add(Input(shape=(128,)))
19 | model.add(Dense(neuron_number, activation='relu'))
20 | model.add(Dense(neuron_number, activation='relu'))
21 | model.add(Dense(class_number, activation='softmax'))
22 | optimizer = Adam(lr=lr)
23 | model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
24 | return model
25 |
26 | def train_mlp_model(embeddings_path = "", classifier_model_path = "", label_encoder_path = ""):
27 | # Trains a MLP classifier using embedding file from "embeddings_path",
28 | # then saves the trained model as "classifier_model_path" and
29 | # label encoding as "label_encoder_path".
30 |
31 | # Load the face embeddings
32 | print("[INFO] loading face embeddings...")
33 | data = pickle.loads(open(embeddings_path, "rb").read())
34 |
35 | # Encode the labels
36 | print("[INFO] encoding labels...")
37 | le = LabelEncoder()
38 | labels = le.fit_transform(data["names"])
39 | class_number = len(set(labels))
40 |
41 | # Reshape the data
42 | embedding_mtx = np.zeros([len(data["embeddings"]),len(data["embeddings"][0])])
43 | for ind in range(1,len(data["embeddings"])):
44 | embedding_mtx[ind,:] = data["embeddings"][ind]
45 |
46 | # Train the model used to accept the 128-d embeddings of the face and
47 | # then produce the actual face recognition
48 | print("[INFO] training model...")
49 | mlp_model = create_mlp_model(optimizer='adam', neuron_number=32, lr=1e-3, class_number=class_number)
50 | recognizer = KerasClassifier(model=mlp_model,
51 | epochs=200,
52 | batch_size=64,
53 | verbose=1)
54 |
55 | recognizer.fit(embedding_mtx,
56 | labels)
57 |
58 | print("[INFO] saving model...")
59 | # Write the actual face recognition model to disk as pickle
60 | with open(classifier_model_path, "wb") as write_file:
61 | pickle.dump(recognizer, write_file)
62 |
63 | # Write the label encoder to disk as pickle
64 | with open(label_encoder_path, "wb") as write_file:
65 | pickle.dump(le, write_file)
66 |
67 | def train_svm_model(embeddings_path = "", classifier_model_path = "", label_encoder_path = ""):
68 | # Trains a SVM classifier using embedding file from "embeddings_path",
69 | # then saves the trained model as "classifier_model_path" and
70 | # label encoding as "label_encoder_path".
71 |
72 | # Load the face embeddings
73 | print("[INFO] loading face embeddings...")
74 | data = pickle.loads(open(embeddings_path, "rb").read())
75 |
76 | # Encode the labels
77 | print("[INFO] encoding labels...")
78 | le = LabelEncoder()
79 | labels = le.fit_transform(data["names"])
80 |
81 | # Train the model used to accept the 128-d embeddings of the face and
82 | # then produce the actual face recognition
83 | print("[INFO] training model...")
84 | recognizer = SVC(C=1, kernel="linear", probability=True)
85 | recognizer.fit(data["embeddings"], labels)
86 |
87 | print("[INFO] saving model...")
88 | # Write the actual face recognition model to disk as pickle
89 | with open(classifier_model_path, "wb") as write_file:
90 | pickle.dump(recognizer, write_file)
91 |
92 | # Write the label encoder to disk as pickle
93 | with open(label_encoder_path, "wb") as write_file:
94 | pickle.dump(le, write_file)
95 |
96 | def train_nb_model(embeddings_path = "", classifier_model_path = "", label_encoder_path = ""):
97 | # Trains a NB classifier using embedding file from "embeddings_path",
98 | # then saves the trained model as "classifier_model_path" and
99 | # label encoding as "label_encoder_path".
100 |
101 | # Load the face embeddings
102 | print("[INFO] loading face embeddings...")
103 | data = pickle.loads(open(embeddings_path, "rb").read())
104 |
105 | # Encode the labels
106 | print("[INFO] encoding labels...")
107 | le = LabelEncoder()
108 | labels = le.fit_transform(data["names"])
109 |
110 | # Train the model used to accept the 128-d embeddings of the face and
111 | # then produce the actual face recognition
112 | print("[INFO] training model...")
113 | recognizer = GaussianNB()
114 | recognizer.fit(data["embeddings"], labels)
115 |
116 | print("[INFO] saving model...")
117 | # Write the actual face recognition model to disk as pickle
118 | with open(classifier_model_path, "wb") as write_file:
119 | pickle.dump(recognizer, write_file)
120 |
121 | # Write the label encoder to disk as pickle
122 | with open(label_encoder_path, "wb") as write_file:
123 | pickle.dump(le, write_file)
124 |
--------------------------------------------------------------------------------
/source/resize_images.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Created on Sat Nov 16 18:16:24 2019
4 |
5 | @author: FCA
6 | """
7 |
8 | import cv2 # OpenCV for image editing, computer vision and deep learning
9 | from source.utils import get_folder_dir # Custom function for better directory name handling
10 |
11 | # Get images directory
12 | images_dir = get_folder_dir("images")
13 |
14 | # Read image
15 | image = cv2.imread(images_dir + "sample4.jpg")
16 |
17 | print('Original Dimensions : ',image.shape)
18 |
19 | scale_percent = 50 # percent of original size
20 | width = int(image.shape[1] * scale_percent / 100)
21 | height = int(image.shape[0] * scale_percent / 100)
22 | dim = (width, height)
23 |
24 | # resize image
25 | resized = cv2.resize(image, dim, interpolation = cv2.INTER_AREA)
26 | print('Resized Dimensions : ',resized.shape)
27 |
28 | # save image
29 | cv2.imwrite(images_dir + "sample4.jpg", resized)
--------------------------------------------------------------------------------
/source/utils.py:
--------------------------------------------------------------------------------
1 | import os
2 | import cv2 # OpenCV for image editing, computer vision and deep learning
3 | import base64 # Used for encoding image content string
4 | import imutils # For easier image path processing
5 | import numpy as np # Numpy for math/array operations
6 | from matplotlib import pyplot as plt # Matplotlib for visualization
7 |
8 | def draw_rectangle(image, face):
9 | (start_x, start_y, end_x, end_y) = face["rect"]
10 | # Arrange color of the detection rectangle to be drawn over image
11 | detection_rect_color_rgb = (0, 255, 255)
12 | # Draw the detection rectangle over image
13 | cv2.rectangle(img = image,
14 | pt1 = (start_x, start_y),
15 | pt2 = (end_x, end_y),
16 | color = detection_rect_color_rgb,
17 | thickness = 2)
18 |
19 | # Draw detection probability, if it is present
20 | if (face["recognition_prob"] != []):
21 | # Create probability text to be drawn over image
22 | text = "{}: {:.2f}%".format(face["name"], face["recognition_prob"])
23 | # Arrange location of the probability text to be drawn over image
24 | y = start_y - 10 if start_y - 10 > 10 else start_y + 10
25 | # Arrange color of the probability text to be drawn over image
26 | probability_color_rgb = (0, 255, 255)
27 | # Draw the probability text over image
28 | cv2.putText(img = image,
29 | text = text,
30 | org = (start_x, y),
31 | fontFace = cv2.FONT_HERSHEY_SIMPLEX,
32 | fontScale = 0.45,
33 | color = probability_color_rgb,
34 | thickness = 1)
35 |
36 | def draw_rectangles(image, faces):
37 | # Draw rectangle over detections, if any face is detected
38 | if len(faces) == 0:
39 | num_faces = 0
40 | else:
41 | num_faces = len(faces)
42 | # Draw a rectangle
43 | for face in faces:
44 | draw_rectangle(image, face)
45 | return num_faces, image
46 |
47 | def read_image(file):
48 | image = cv2.imdecode(np.fromstring(file.read(), np.uint8), cv2.IMREAD_COLOR)
49 | image = imutils.resize(image, width=600)
50 | return image
51 |
52 | def prepare_image(image):
53 | image_content = cv2.imencode('.jpg', image)[1].tostring()
54 | encoded_image = base64.encodestring(image_content)
55 | to_send = 'data:image/jpg;base64, ' + str(encoded_image, 'utf-8')
56 | return to_send
57 |
58 | def plot_image(image):
59 | plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
60 |
61 | def get_folder_dir(folder_name):
62 | cur_dir = os.getcwd()
63 | folder_dir = cur_dir + os.sep + folder_name + os.sep
64 | return folder_dir
--------------------------------------------------------------------------------
/templates/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
Upload an image to see recognized character faces from LOTR
15 |Face recognition using FaceNet Pytorch Model | Powered by Python, Flask, OpenCV, Caffe, Pytorch
16 |Face detection using Resnet10 SSD Caffe Model | Powered by Python, Flask, OpenCV, Caffe
17 |Image size limit: 2 MB
18 |By Fatih C. Akyon - November, 2019
19 |