├── Documents └── ProjectOS │ ├── add_new_face.py │ ├── cli.py │ ├── configuration.py │ ├── disable_facerecognition.py │ ├── list_of_faces.py │ ├── match_face.py │ ├── remove_face.py │ └── test_face.py └── README.md /Documents/ProjectOS/add_new_face.py: -------------------------------------------------------------------------------- 1 | #PROJECTOS 2 | #BY-ARPIT JAIN 3 | import time 4 | import os 5 | import sys 6 | import json 7 | import configparser 8 | import builtins 9 | import numpy as np 10 | import paths_factory 11 | 12 | from recorders.video_capture import VideoCapture 13 | from i18n import _ 14 | try: 15 | import dlib 16 | except ImportError as err: 17 | print(err) 18 | 19 | print(_("\nCan't import the dlib module, check the output of")) 20 | print("pip3 show dlib") 21 | sys.exit(1) 22 | import cv2 23 | if not os.path.isfile(paths_factory.shape_predictor_5_face_landmarks_path()): 24 | print(_("Data files have not been downloaded, please run the following commands:")) 25 | print("\n\tcd " + paths_factory.dlib_data_dir_path()) 26 | print("\tsudo ./install.sh\n") 27 | sys.exit(1) 28 | 29 | config = configparser.ConfigParser() 30 | config.read(paths_factory.config_file_path()) 31 | 32 | use_cnn = config.getboolean("core", "use_cnn", fallback=False) 33 | if use_cnn: 34 | face_detector = dlib.cnn_face_detection_model_v1(paths_factory.mmod_human_face_detector_path()) 35 | else: 36 | face_detector = dlib.get_frontal_face_detector() 37 | 38 | pose_predictor = dlib.shape_predictor(paths_factory.shape_predictor_5_face_landmarks_path()) 39 | face_encoder = dlib.face_recognition_model_v1(paths_factory.dlib_face_recognition_resnet_model_v1_path()) 40 | 41 | user = builtins.howdy_user 42 | 43 | enc_file = paths_factory.user_model_path(user) 44 | 45 | encodings = [] 46 | 47 | 48 | if not os.path.exists(paths_factory.user_models_dir_path()): 49 | print(_("No face model folder found, creating one")) 50 | os.makedirs(paths_factory.user_models_dir_path()) 51 | 52 | try: 53 | encodings = json.load(open(enc_file)) 54 | except FileNotFoundError: 55 | encodings = [] 56 | 57 | if len(encodings) > 3: 58 | print(_("NOTICE: Each additional model slows down the face recognition engine slightly")) 59 | print(_("Press Ctrl+C to cancel\n")) 60 | 61 | if not builtins.howdy_args.plain: 62 | print(_("Adding face model for the user ") + user) 63 | 64 | label = "Initial model" 65 | 66 | next_id = encodings[-1]["id"] + 1 if encodings else 0 67 | 68 | if builtins.howdy_args.arguments: 69 | label = builtins.howdy_args.arguments[0] 70 | 71 | else: 72 | label = _("Model #") + str(next_id) 73 | 74 | 75 | if builtins.howdy_args.y: 76 | print(_('Using default label "%s" because of -y flag') % (label, )) 77 | else: 78 | 79 | label_in = input(_("Enter a label for this new model [{}]: ").format(label)) 80 | if label_in != "": 81 | label = label_in[:24] 82 | 83 | if "," in label: 84 | print(_("NOTICE: Removing illegal character \",\" from model name")) 85 | label = label.replace(",", "") 86 | 87 | # Prepare the metadata for insertion 88 | insert_model = { 89 | "time": int(time.time()), 90 | "label": label, 91 | "id": next_id, 92 | "data": [] 93 | } 94 | 95 | # Set up video_capture 96 | video_capture = VideoCapture(config) 97 | 98 | print(_("\nPlease look straight into the camera")) 99 | 100 | # Give the user time to read 101 | time.sleep(2) 102 | 103 | # Will contain found face encodings 104 | enc = [] 105 | # Count the number of read frames 106 | frames = 0 107 | # Count the number of illuminated read frames 108 | valid_frames = 0 109 | # Count the number of illuminated frames that 110 | # were rejected for being too dark 111 | dark_tries = 0 112 | # Track the running darkness total 113 | dark_running_total = 0 114 | face_locations = None 115 | 116 | dark_threshold = config.getfloat("video", "dark_threshold", fallback=60) 117 | 118 | clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)) 119 | 120 | # Loop through frames till we hit a timeout 121 | while frames < 60: 122 | frames += 1 123 | # Grab a single frame of video 124 | frame, gsframe = video_capture.read_frame() 125 | gsframe = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) 126 | gsframe = clahe.apply(gsframe) 127 | 128 | # Create a histogram of the image with 8 values 129 | hist = cv2.calcHist([gsframe], [0], None, [8], [0, 256]) 130 | # All values combined for percentage calculation 131 | hist_total = np.sum(hist) 132 | 133 | # Calculate frame darkness 134 | darkness = (hist[0] / hist_total * 100) 135 | 136 | # If the image is fully black due to a bad camera read, 137 | # skip to the next frame 138 | if (hist_total == 0) or (darkness == 100): 139 | continue 140 | 141 | # Include this frame in calculating our average session brightness 142 | dark_running_total += darkness 143 | valid_frames += 1 144 | 145 | # If the image exceeds darkness threshold due to subject distance, 146 | # skip to the next frame 147 | if (darkness > dark_threshold): 148 | dark_tries += 1 149 | continue 150 | 151 | # Get all faces from that frame as encodings 152 | face_locations = face_detector(gsframe, 1) 153 | 154 | # If we've found at least one, we can continue 155 | if face_locations: 156 | break 157 | 158 | video_capture.release() 159 | 160 | # If we've found no faces, try to determine why 161 | if not face_locations: 162 | if valid_frames == 0: 163 | print(_("Camera saw only black frames - is IR emitter working?")) 164 | elif valid_frames == dark_tries: 165 | print(_("All frames were too dark, please check dark_threshold in config")) 166 | print(_("Average darkness: {avg}, Threshold: {threshold}").format(avg=str(dark_running_total / valid_frames), threshold=str(dark_threshold))) 167 | else: 168 | print(_("No face detected, aborting")) 169 | sys.exit(1) 170 | 171 | # If more than 1 faces are detected we can't know which one belongs to the user 172 | elif len(face_locations) > 1: 173 | print(_("Multiple faces detected, aborting")) 174 | sys.exit(1) 175 | 176 | face_location = face_locations[0] 177 | if use_cnn: 178 | face_location = face_location.rect 179 | 180 | # Get the encodings in the frame 181 | face_landmark = pose_predictor(frame, face_location) 182 | face_encoding = np.array(face_encoder.compute_face_descriptor(frame, face_landmark, 1)) 183 | 184 | insert_model["data"].append(face_encoding.tolist()) 185 | 186 | # Insert full object into the list 187 | encodings.append(insert_model) 188 | 189 | # Save the new encodings to disk 190 | with open(enc_file, "w") as datafile: 191 | json.dump(encodings, datafile) 192 | 193 | # Give let the user know how it went 194 | print(_("""\nScan complete 195 | Added a new model to """) + user) 196 | -------------------------------------------------------------------------------- /Documents/ProjectOS/cli.py: -------------------------------------------------------------------------------- 1 | #PROJECT OS 2 | #BY-ARPIT JAIN 3 | 4 | import sys 5 | import os 6 | import pwd 7 | import getpass 8 | import argparse 9 | import builtins 10 | 11 | from i18n import _ 12 | 13 | sudo_user = os.environ.get("SUDO_USER") 14 | doas_user = os.environ.get("DOAS_USER") 15 | pkexec_uid = os.environ.get("PKEXEC_UID") 16 | pkexec_user = pwd.getpwuid(int(pkexec_uid))[0] if pkexec_uid else "" 17 | env_user = getpass.getuser() 18 | user = next((u for u in [sudo_user, doas_user, pkexec_user, env_user] if u), "") 19 | 20 | if user == "": 21 | print(_("Could not determine user, please use the --user flag")) 22 | sys.exit(1) 23 | 24 | parser = argparse.ArgumentParser( 25 | description=_("Command line interface for Howdy face authentication."), 26 | formatter_class=argparse.RawDescriptionHelpFormatter, 27 | add_help=False, 28 | prog="howdy", 29 | usage="howdy [-U USER] [--plain] [-h] [-y] {command} [{arguments}...]".format(command=_("command"), arguments=_("arguments")), 30 | epilog=_("For support please visit\nhttps://github.com/boltgolt/howdy")) 31 | 32 | parser.add_argument( 33 | "command", 34 | help=_("The command option to execute, can be one of the following: add, clear, config, disable, list, remove, snapshot, set, test or version."), 35 | metavar="command", 36 | choices=["add", "clear", "config", "disable", "list", "remove", "set", "snapshot", "test", "version"]) 37 | 38 | parser.add_argument( 39 | "arguments", 40 | help=_("Optional arguments for the add, disable, remove and set commands."), 41 | nargs="*") 42 | 43 | parser.add_argument( 44 | "-U", "--user", 45 | default=user, 46 | help=_("Set the user account to use.")) 47 | 48 | parser.add_argument( 49 | "-y", 50 | help=_("Skip all questions."), 51 | action="store_true") 52 | 53 | parser.add_argument( 54 | "--plain", 55 | help=_("Print machine-friendly output."), 56 | action="store_true") 57 | 58 | parser.add_argument( 59 | "-h", "--help", 60 | action="help", 61 | default=argparse.SUPPRESS, 62 | help=_("Show this help message and exit.")) 63 | 64 | if len(sys.argv) < 2: 65 | print(_("current active user: ") + user + "\n") 66 | parser.print_help() 67 | sys.exit(0) 68 | 69 | args = parser.parse_args() 70 | 71 | builtins.howdy_args = args 72 | builtins.howdy_user = args.user 73 | 74 | if os.geteuid() != 0: 75 | print(_("Please run this command as root:\n")) 76 | print("\tsudo howdy " + " ".join(sys.argv[1:])) 77 | sys.exit(1) 78 | 79 | if args.user == "root": 80 | print(_("Can't run howdy commands as root, please run this command with the --user flag")) 81 | sys.exit(1) 82 | 83 | if args.command == "add": 84 | import cli.add 85 | elif args.command == "clear": 86 | import cli.clear 87 | elif args.command == "config": 88 | import cli.config 89 | elif args.command == "disable": 90 | import cli.disable 91 | elif args.command == "list": 92 | import cli.list 93 | elif args.command == "remove": 94 | import cli.remove 95 | elif args.command == "set": 96 | import cli.set 97 | elif args.command == "snapshot": 98 | import cli.snap 99 | elif args.command == "test": 100 | import cli.test 101 | else: 102 | print("Howdy 3.0.0 BETA") 103 | -------------------------------------------------------------------------------- /Documents/ProjectOS/configuration.py: -------------------------------------------------------------------------------- 1 | #PROJECTOS 2 | #BY-ARPIT JAIN 3 | import os 4 | import subprocess 5 | import paths_factory 6 | 7 | from i18n import _ 8 | 9 | # Let the user know what we're doing 10 | print(_("Opening config.ini in the default editor")) 11 | 12 | # Default to the nano editor 13 | editor = "/bin/nano" 14 | 15 | # Use the user preferred editor if available 16 | if "EDITOR" in os.environ: 17 | editor = os.environ["EDITOR"] 18 | elif os.path.isfile("/etc/alternatives/editor"): 19 | editor = "/etc/alternatives/editor" 20 | 21 | # Open the editor as a subprocess and fork it 22 | subprocess.call([editor, paths_factory.config_file_path()]) 23 | -------------------------------------------------------------------------------- /Documents/ProjectOS/disable_facerecognition.py: -------------------------------------------------------------------------------- 1 | #PROJECTOS 2 | #BY-ARPIT JAIN 3 | import sys 4 | import os 5 | import builtins 6 | import fileinput 7 | import configparser 8 | import paths_factory 9 | 10 | from i18n import _ 11 | 12 | # Get the absolute filepath 13 | config_path = paths_factory.config_file_path() 14 | 15 | # Read config from disk 16 | config = configparser.ConfigParser() 17 | config.read(config_path) 18 | 19 | # Check if enough arguments have been passed 20 | if not builtins.howdy_args.arguments: 21 | print(_("Please add a 0 (enable) or a 1 (disable) as an argument")) 22 | sys.exit(1) 23 | 24 | # Get the cli argument 25 | argument = builtins.howdy_args.arguments[0] 26 | 27 | # Translate the argument to the right string 28 | if argument == "1" or argument.lower() == "true": 29 | out_value = "true" 30 | elif argument == "0" or argument.lower() == "false": 31 | out_value = "false" 32 | else: 33 | # Of it's not a 0 or a 1, it's invalid 34 | print(_("Please only use 0 (enable) or 1 (disable) as an argument")) 35 | sys.exit(1) 36 | 37 | # Don't do anything when the state is already the requested one 38 | if out_value == config.get("core", "disabled", fallback=True): 39 | print(_("The disable option has already been set to ") + out_value) 40 | sys.exit(1) 41 | 42 | # Loop though the config file and only replace the line containing the disable config 43 | for line in fileinput.input([config_path], inplace=1): 44 | print(line.replace("disabled = " + config.get("core", "disabled", fallback=True), "disabled = " + out_value), end="") 45 | 46 | # Print what we just did 47 | if out_value == "true": 48 | print(_("Howdy has been disabled")) 49 | else: 50 | print(_("Howdy has been enabled")) 51 | -------------------------------------------------------------------------------- /Documents/ProjectOS/list_of_faces.py: -------------------------------------------------------------------------------- 1 | #PROJECTOS 2 | #BY-ARPIT JAIN 3 | import sys 4 | import os 5 | import json 6 | import time 7 | import builtins 8 | import paths_factory 9 | 10 | from i18n import _ 11 | 12 | user = builtins.howdy_user 13 | 14 | # Check if the models file has been created yet 15 | if not os.path.exists(paths_factory.user_models_dir_path()): 16 | print(_("Face models have not been initialized yet, please run:")) 17 | print("\n\tsudo howdy -U " + user + " add\n") 18 | sys.exit(1) 19 | 20 | # Path to the models file 21 | enc_file = paths_factory.user_model_path(user) 22 | 23 | # Try to load the models file and abort if the user does not have it yet 24 | try: 25 | encodings = json.load(open(enc_file)) 26 | except FileNotFoundError: 27 | if not builtins.howdy_args.plain: 28 | print(_("No face model known for the user {}, please run:").format(user)) 29 | print("\n\tsudo howdy -U " + user + " add\n") 30 | sys.exit(1) 31 | 32 | # Print a header if we're not in plain mode 33 | if not builtins.howdy_args.plain: 34 | print(_("Known face models for {}:").format(user)) 35 | print("\n\033[1;29m" + _("ID Date Label\033[0m")) 36 | 37 | # Loop through all encodings and print info about them 38 | for enc in encodings: 39 | # Start with the id 40 | print(str(enc["id"]), end="") 41 | 42 | # Add comma for machine reading 43 | if builtins.howdy_args.plain: 44 | print(",", end="") 45 | # Print padding spaces after the id for a nice layout 46 | else: 47 | print((4 - len(str(enc["id"]))) * " ", end="") 48 | 49 | # Format the time as ISO in the local timezone 50 | print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(enc["time"])), end="") 51 | 52 | # Separate with commas again for machines, spaces otherwise 53 | print("," if builtins.howdy_args.plain else " ", end="") 54 | 55 | # End with the label 56 | print(enc["label"]) 57 | 58 | # Add a closing enter 59 | print() 60 | -------------------------------------------------------------------------------- /Documents/ProjectOS/match_face.py: -------------------------------------------------------------------------------- 1 | #PROJECTOS 2 | #BY-ARPIT JAIN 3 | import time 4 | 5 | # Start timing 6 | timings = { 7 | "st": time.time() 8 | } 9 | 10 | # Import required modules 11 | import sys 12 | import os 13 | import json 14 | import configparser 15 | import dlib 16 | import cv2 17 | from datetime import timezone, datetime 18 | import atexit 19 | import subprocess 20 | import snapshot 21 | import numpy as np 22 | import _thread as thread 23 | import paths_factory 24 | from recorders.video_capture import VideoCapture 25 | from i18n import _ 26 | 27 | def exit(code=None): 28 | """Exit while closing howdy-gtk properly""" 29 | global gtk_proc 30 | 31 | # Exit the auth ui process if there is one 32 | if "gtk_proc" in globals(): 33 | gtk_proc.terminate() 34 | 35 | # Exit compare 36 | if code is not None: 37 | sys.exit(code) 38 | 39 | 40 | def init_detector(lock): 41 | """Start face detector, encoder and predictor in a new thread""" 42 | global face_detector, pose_predictor, face_encoder 43 | 44 | # Test if at lest 1 of the data files is there and abort if it's not 45 | if not os.path.isfile(paths_factory.shape_predictor_5_face_landmarks_path()): 46 | print(_("Data files have not been downloaded, please run the following commands:")) 47 | print("\n\tcd " + paths_factory.dlib_data_dir_path()) 48 | print("\tsudo ./install.sh\n") 49 | lock.release() 50 | exit(1) 51 | 52 | # Use the CNN detector if enabled 53 | if use_cnn: 54 | face_detector = dlib.cnn_face_detection_model_v1(paths_factory.mmod_human_face_detector_path()) 55 | else: 56 | face_detector = dlib.get_frontal_face_detector() 57 | 58 | # Start the others regardless 59 | pose_predictor = dlib.shape_predictor(paths_factory.shape_predictor_5_face_landmarks_path()) 60 | face_encoder = dlib.face_recognition_model_v1(paths_factory.dlib_face_recognition_resnet_model_v1_path()) 61 | 62 | # Note the time it took to initialize detectors 63 | timings["ll"] = time.time() - timings["ll"] 64 | lock.release() 65 | 66 | 67 | def make_snapshot(type): 68 | """Generate snapshot after detection""" 69 | snapshot.generate(snapframes, [ 70 | type + _(" LOGIN"), 71 | _("Date: ") + datetime.now(timezone.utc).strftime("%Y/%m/%d %H:%M:%S UTC"), 72 | _("Scan time: ") + str(round(time.time() - timings["fr"], 2)) + "s", 73 | _("Frames: ") + str(frames) + " (" + str(round(frames / (time.time() - timings["fr"]), 2)) + "FPS)", 74 | _("Hostname: ") + os.uname().nodename, 75 | _("Best certainty value: ") + str(round(lowest_certainty * 10, 1)) 76 | ]) 77 | 78 | 79 | def send_to_ui(type, message): 80 | """Send message to the auth ui""" 81 | global gtk_proc 82 | 83 | # Only execute of the process started 84 | if "gtk_proc" in globals(): 85 | # Format message so the ui can parse it 86 | message = type + "=" + message + " \n" 87 | 88 | # Try to send the message to the auth ui, but it's okay if that fails 89 | try: 90 | gtk_proc.stdin.write(bytearray(message.encode("utf-8"))) 91 | gtk_proc.stdin.flush() 92 | except IOError: 93 | pass 94 | 95 | 96 | # Make sure we were given an username to test against 97 | if len(sys.argv) < 2: 98 | exit(12) 99 | 100 | # The username of the user being authenticated 101 | user = sys.argv[1] 102 | # The model file contents 103 | models = [] 104 | # Encoded face models 105 | encodings = [] 106 | # Amount of ignored 100% black frames 107 | black_tries = 0 108 | # Amount of ignored dark frames 109 | dark_tries = 0 110 | # Total amount of frames captured 111 | frames = 0 112 | # Captured frames for snapshot capture 113 | snapframes = [] 114 | # Tracks the lowest certainty value in the loop 115 | lowest_certainty = 10 116 | # Face recognition/detection instances 117 | face_detector = None 118 | pose_predictor = None 119 | face_encoder = None 120 | 121 | # Try to load the face model from the models folder 122 | try: 123 | models = json.load(open(paths_factory.user_model_path(user))) 124 | 125 | for model in models: 126 | encodings += model["data"] 127 | except FileNotFoundError: 128 | exit(10) 129 | 130 | # Check if the file contains a model 131 | if len(models) < 1: 132 | exit(10) 133 | 134 | # Read config from disk 135 | config = configparser.ConfigParser() 136 | config.read(paths_factory.config_file_path()) 137 | 138 | # Get all config values needed 139 | use_cnn = config.getboolean("core", "use_cnn", fallback=False) 140 | timeout = config.getint("video", "timeout", fallback=4) 141 | dark_threshold = config.getfloat("video", "dark_threshold", fallback=50.0) 142 | video_certainty = config.getfloat("video", "certainty", fallback=3.5) / 10 143 | end_report = config.getboolean("debug", "end_report", fallback=False) 144 | save_failed = config.getboolean("snapshots", "save_failed", fallback=False) 145 | save_successful = config.getboolean("snapshots", "save_successful", fallback=False) 146 | gtk_stdout = config.getboolean("debug", "gtk_stdout", fallback=False) 147 | rotate = config.getint("video", "rotate", fallback=0) 148 | 149 | # Send the gtk output to the terminal if enabled in the config 150 | gtk_pipe = sys.stdout if gtk_stdout else subprocess.DEVNULL 151 | 152 | # Start the auth ui, register it to be always be closed on exit 153 | try: 154 | gtk_proc = subprocess.Popen(["howdy-gtk", "--start-auth-ui"], stdin=subprocess.PIPE, stdout=gtk_pipe, stderr=gtk_pipe) 155 | atexit.register(exit) 156 | except FileNotFoundError: 157 | pass 158 | 159 | # Write to the stdin to redraw ui 160 | send_to_ui("M", _("Starting up...")) 161 | 162 | # Save the time needed to start the script 163 | timings["in"] = time.time() - timings["st"] 164 | 165 | # Import face recognition, takes some time 166 | timings["ll"] = time.time() 167 | 168 | # Start threading and wait for init to finish 169 | lock = thread.allocate_lock() 170 | lock.acquire() 171 | thread.start_new_thread(init_detector, (lock, )) 172 | 173 | # Start video capture on the IR camera 174 | timings["ic"] = time.time() 175 | 176 | video_capture = VideoCapture(config) 177 | 178 | # Read exposure from config to use in the main loop 179 | exposure = config.getint("video", "exposure", fallback=-1) 180 | 181 | # Note the time it took to open the camera 182 | timings["ic"] = time.time() - timings["ic"] 183 | 184 | # wait for thread to finish 185 | lock.acquire() 186 | lock.release() 187 | del lock 188 | 189 | # Fetch the max frame height 190 | max_height = config.getfloat("video", "max_height", fallback=320.0) 191 | 192 | # Get the height of the image (which would be the width if screen is portrait oriented) 193 | height = video_capture.internal.get(cv2.CAP_PROP_FRAME_HEIGHT) or 1 194 | if rotate == 2: 195 | height = video_capture.internal.get(cv2.CAP_PROP_FRAME_WIDTH) or 1 196 | # Calculate the amount the image has to shrink 197 | scaling_factor = (max_height / height) or 1 198 | 199 | # Fetch config settings out of the loop 200 | timeout = config.getint("video", "timeout", fallback=4) 201 | dark_threshold = config.getfloat("video", "dark_threshold", fallback=60) 202 | end_report = config.getboolean("debug", "end_report", fallback=False) 203 | 204 | # Initiate histogram equalization 205 | clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)) 206 | 207 | # Let the ui know that we're ready 208 | send_to_ui("M", _("Identifying you...")) 209 | 210 | # Start the read loop 211 | frames = 0 212 | valid_frames = 0 213 | timings["fr"] = time.time() 214 | dark_running_total = 0 215 | 216 | while True: 217 | # Increment the frame count every loop 218 | frames += 1 219 | 220 | # Form a string to let the user know we're real busy 221 | ui_subtext = "Scanned " + str(valid_frames - dark_tries) + " frames" 222 | if (dark_tries > 1): 223 | ui_subtext += " (skipped " + str(dark_tries) + " dark frames)" 224 | # Show it in the ui as subtext 225 | send_to_ui("S", ui_subtext) 226 | 227 | # Stop if we've exceeded the time limit 228 | if time.time() - timings["fr"] > timeout: 229 | # Create a timeout snapshot if enabled 230 | if save_failed: 231 | make_snapshot(_("FAILED")) 232 | 233 | if dark_tries == valid_frames: 234 | print(_("All frames were too dark, please check dark_threshold in config")) 235 | print(_("Average darkness: {avg}, Threshold: {threshold}").format(avg=str(dark_running_total / max(1, valid_frames)), threshold=str(dark_threshold))) 236 | exit(13) 237 | else: 238 | exit(11) 239 | 240 | # Grab a single frame of video 241 | frame, gsframe = video_capture.read_frame() 242 | gsframe = clahe.apply(gsframe) 243 | 244 | # If snapshots have been turned on 245 | if save_failed or save_successful: 246 | # Start capturing frames for the snapshot 247 | if len(snapframes) < 3: 248 | snapframes.append(frame) 249 | 250 | # Create a histogram of the image with 8 values 251 | hist = cv2.calcHist([gsframe], [0], None, [8], [0, 256]) 252 | # All values combined for percentage calculation 253 | hist_total = np.sum(hist) 254 | 255 | # Calculate frame darkness 256 | darkness = (hist[0] / hist_total * 100) 257 | 258 | # If the image is fully black due to a bad camera read, 259 | # skip to the next frame 260 | if (hist_total == 0) or (darkness == 100): 261 | black_tries += 1 262 | continue 263 | 264 | dark_running_total += darkness 265 | valid_frames += 1 266 | 267 | # If the image exceeds darkness threshold due to subject distance, 268 | # skip to the next frame 269 | if (darkness > dark_threshold): 270 | dark_tries += 1 271 | continue 272 | 273 | # If the height is too high 274 | if scaling_factor != 1: 275 | # Apply that factor to the frame 276 | frame = cv2.resize(frame, None, fx=scaling_factor, fy=scaling_factor, interpolation=cv2.INTER_AREA) 277 | gsframe = cv2.resize(gsframe, None, fx=scaling_factor, fy=scaling_factor, interpolation=cv2.INTER_AREA) 278 | 279 | # If camera is configured to rotate = 1, check portrait in addition to landscape 280 | if rotate == 1: 281 | if frames % 3 == 1: 282 | frame = cv2.rotate(frame, cv2.ROTATE_90_COUNTERCLOCKWISE) 283 | gsframe = cv2.rotate(gsframe, cv2.ROTATE_90_COUNTERCLOCKWISE) 284 | if frames % 3 == 2: 285 | frame = cv2.rotate(frame, cv2.ROTATE_90_CLOCKWISE) 286 | gsframe = cv2.rotate(gsframe, cv2.ROTATE_90_CLOCKWISE) 287 | 288 | # If camera is configured to rotate = 2, check portrait orientation 289 | elif rotate == 2: 290 | if frames % 2 == 0: 291 | frame = cv2.rotate(frame, cv2.ROTATE_90_COUNTERCLOCKWISE) 292 | gsframe = cv2.rotate(gsframe, cv2.ROTATE_90_COUNTERCLOCKWISE) 293 | else: 294 | frame = cv2.rotate(frame, cv2.ROTATE_90_CLOCKWISE) 295 | gsframe = cv2.rotate(gsframe, cv2.ROTATE_90_CLOCKWISE) 296 | 297 | # Get all faces from that frame as encodings 298 | # Upsamples 1 time 299 | face_locations = face_detector(gsframe, 1) 300 | # Loop through each face 301 | for fl in face_locations: 302 | if use_cnn: 303 | fl = fl.rect 304 | 305 | # Fetch the faces in the image 306 | face_landmark = pose_predictor(frame, fl) 307 | face_encoding = np.array(face_encoder.compute_face_descriptor(frame, face_landmark, 1)) 308 | 309 | # Match this found face against a known face 310 | matches = np.linalg.norm(encodings - face_encoding, axis=1) 311 | 312 | # Get best match 313 | match_index = np.argmin(matches) 314 | match = matches[match_index] 315 | 316 | # Update certainty if we have a new low 317 | if lowest_certainty > match: 318 | lowest_certainty = match 319 | 320 | # Check if a match that's confident enough 321 | if 0 < match < video_certainty: 322 | timings["tt"] = time.time() - timings["st"] 323 | timings["fl"] = time.time() - timings["fr"] 324 | 325 | # If set to true in the config, print debug text 326 | if end_report: 327 | def print_timing(label, k): 328 | """Helper function to print a timing from the list""" 329 | print(" %s: %dms" % (label, round(timings[k] * 1000))) 330 | 331 | # Print a nice timing report 332 | print(_("Time spent")) 333 | print_timing(_("Starting up"), "in") 334 | print(_(" Open cam + load libs: %dms") % (round(max(timings["ll"], timings["ic"]) * 1000, ))) 335 | print_timing(_(" Opening the camera"), "ic") 336 | print_timing(_(" Importing recognition libs"), "ll") 337 | print_timing(_("Searching for known face"), "fl") 338 | print_timing(_("Total time"), "tt") 339 | 340 | print(_("\nResolution")) 341 | width = video_capture.fw or 1 342 | print(_(" Native: %dx%d") % (height, width)) 343 | # Save the new size for diagnostics 344 | scale_height, scale_width = frame.shape[:2] 345 | print(_(" Used: %dx%d") % (scale_height, scale_width)) 346 | 347 | # Show the total number of frames and calculate the FPS by dividing it by the total scan time 348 | print(_("\nFrames searched: %d (%.2f fps)") % (frames, frames / timings["fl"])) 349 | print(_("Black frames ignored: %d ") % (black_tries, )) 350 | print(_("Dark frames ignored: %d ") % (dark_tries, )) 351 | print(_("Certainty of winning frame: %.3f") % (match * 10, )) 352 | 353 | print(_("Winning model: %d (\"%s\")") % (match_index, models[match_index]["label"])) 354 | 355 | # Make snapshot if enabled 356 | if save_successful: 357 | make_snapshot(_("SUCCESSFUL")) 358 | 359 | # Run rubberstamps if enabled 360 | if config.getboolean("rubberstamps", "enabled", fallback=False): 361 | import rubberstamps 362 | 363 | send_to_ui("S", "") 364 | 365 | if "gtk_proc" not in vars(): 366 | gtk_proc = None 367 | 368 | rubberstamps.execute(config, gtk_proc, { 369 | "video_capture": video_capture, 370 | "face_detector": face_detector, 371 | "pose_predictor": pose_predictor, 372 | "clahe": clahe 373 | }) 374 | 375 | # End peacefully 376 | exit(0) 377 | 378 | if exposure != -1: 379 | # For a strange reason on some cameras (e.g. Lenoxo X1E) setting manual exposure works only after a couple frames 380 | # are captured and even after a delay it does not always work. Setting exposure at every frame is reliable though. 381 | video_capture.internal.set(cv2.CAP_PROP_AUTO_EXPOSURE, 1.0) # 1 = Manual 382 | video_capture.internal.set(cv2.CAP_PROP_EXPOSURE, float(exposure)) 383 | -------------------------------------------------------------------------------- /Documents/ProjectOS/remove_face.py: -------------------------------------------------------------------------------- 1 | #PROJECTOS 2 | #BY-ARPIT JAIN 3 | import sys 4 | import os 5 | import json 6 | import builtins 7 | import paths_factory 8 | 9 | from i18n import _ 10 | 11 | user = builtins.howdy_user 12 | 13 | # Check if enough arguments have been passed 14 | if not builtins.howdy_args.arguments: 15 | print(_("Please add the ID of the model you want to remove as an argument")) 16 | print(_("For example:")) 17 | print("\n\thowdy remove 0\n") 18 | print(_("You can find the IDs by running:")) 19 | print("\n\thowdy list\n") 20 | sys.exit(1) 21 | 22 | # Check if the models file has been created yet 23 | if not os.path.exists(paths_factory.user_models_dir_path()): 24 | print(_("Face models have not been initialized yet, please run:")) 25 | print("\n\thowdy add\n") 26 | sys.exit(1) 27 | 28 | # Path to the models file 29 | enc_file = paths_factory.user_model_path(user) 30 | 31 | # Try to load the models file and abort if the user does not have it yet 32 | try: 33 | encodings = json.load(open(enc_file)) 34 | except FileNotFoundError: 35 | print(_("No face model known for the user {}, please run:").format(user)) 36 | print("\n\thowdy add\n") 37 | sys.exit(1) 38 | 39 | # Tracks if a encoding with that id has been found 40 | found = False 41 | 42 | # Get the ID from the cli arguments 43 | id = builtins.howdy_args.arguments[0] 44 | 45 | # Loop though all encodings and check if they match the argument 46 | for enc in encodings: 47 | if str(enc["id"]) == id: 48 | # Only ask the user if there's no -y flag 49 | if not builtins.howdy_args.y: 50 | # Double check with the user 51 | print(_('This will remove the model called "{label}" for {user}').format(label=enc["label"], user=user)) 52 | ans = input(_("Do you want to continue [y/N]: ")) 53 | 54 | # Abort if the answer isn't yes 55 | if (ans.lower() != "y"): 56 | print(_('\nInterpreting as a "NO", aborting')) 57 | sys.exit(1) 58 | 59 | # Add a padding empty line 60 | print() 61 | 62 | # Mark as found and print an enter 63 | found = True 64 | break 65 | 66 | # Abort if no matching id was found 67 | if not found: 68 | print(_("No model with ID {id} exists for {user}").format(id=id, user=user)) 69 | sys.exit(1) 70 | 71 | # Remove the entire file if this encoding is the only one 72 | if len(encodings) == 1: 73 | os.remove(paths_factory.user_model_path(user)) 74 | print(_("Removed last model, howdy disabled for user")) 75 | else: 76 | # A place holder to contain the encodings that will remain 77 | new_encodings = [] 78 | 79 | # Loop though all encodings and only add those that don't need to be removed 80 | for enc in encodings: 81 | if str(enc["id"]) != id: 82 | new_encodings.append(enc) 83 | 84 | # Save this new set to disk 85 | with open(enc_file, "w") as datafile: 86 | json.dump(new_encodings, datafile) 87 | 88 | print(_("Removed model {}").format(id)) 89 | -------------------------------------------------------------------------------- /Documents/ProjectOS/test_face.py: -------------------------------------------------------------------------------- 1 | #PROJECTOS 2 | #BY-ARPIT JAIN 3 | import configparser 4 | import builtins 5 | import os 6 | import json 7 | import sys 8 | import time 9 | import dlib 10 | import cv2 11 | import numpy as np 12 | import paths_factory 13 | 14 | from i18n import _ 15 | from recorders.video_capture import VideoCapture 16 | 17 | # Read config from disk 18 | config = configparser.ConfigParser() 19 | config.read(paths_factory.config_file_path()) 20 | 21 | if config.get("video", "recording_plugin", fallback="opencv") != "opencv": 22 | print(_("Howdy has been configured to use a recorder which doesn't support the test command yet, aborting")) 23 | sys.exit(12) 24 | 25 | video_capture = VideoCapture(config) 26 | 27 | # Read config values to use in the main loop 28 | video_certainty = config.getfloat("video", "certainty", fallback=3.5) / 10 29 | exposure = config.getint("video", "exposure", fallback=-1) 30 | dark_threshold = config.getfloat("video", "dark_threshold", fallback=60) 31 | 32 | # Let the user know what's up 33 | print(_(""" 34 | Opening a window with a test feed 35 | 36 | Press ctrl+C in this terminal to quit 37 | Click on the image to enable or disable slow mode 38 | """)) 39 | 40 | 41 | def mouse(event, x, y, flags, param): 42 | """Handle mouse events""" 43 | global slow_mode 44 | 45 | # Toggle slowmode on click 46 | if event == cv2.EVENT_LBUTTONDOWN: 47 | slow_mode = not slow_mode 48 | 49 | 50 | def print_text(line_number, text): 51 | """Print the status text by line number""" 52 | cv2.putText(overlay, text, (10, height - 10 - (10 * line_number)), cv2.FONT_HERSHEY_SIMPLEX, .3, (0, 255, 0), 0, cv2.LINE_AA) 53 | 54 | 55 | use_cnn = config.getboolean('core', 'use_cnn', fallback=False) 56 | 57 | if use_cnn: 58 | face_detector = dlib.cnn_face_detection_model_v1( 59 | paths_factory.mmod_human_face_detector_path() 60 | ) 61 | else: 62 | face_detector = dlib.get_frontal_face_detector() 63 | 64 | pose_predictor = dlib.shape_predictor(paths_factory.shape_predictor_5_face_landmarks_path()) 65 | face_encoder = dlib.face_recognition_model_v1(paths_factory.dlib_face_recognition_resnet_model_v1_path()) 66 | 67 | encodings = [] 68 | models = None 69 | 70 | try: 71 | user = builtins.howdy_user 72 | models = json.load(open(paths_factory.user_model_path(user))) 73 | 74 | for model in models: 75 | encodings += model["data"] 76 | except FileNotFoundError: 77 | pass 78 | 79 | clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)) 80 | 81 | # Open the window and attach a a mouse listener 82 | cv2.namedWindow("Howdy Test") 83 | cv2.setMouseCallback("Howdy Test", mouse) 84 | 85 | # Enable a delay in the loop 86 | slow_mode = False 87 | # Count all frames ever 88 | total_frames = 0 89 | # Count all frames per second 90 | sec_frames = 0 91 | # Last secands FPS 92 | fps = 0 93 | # The current second we're counting 94 | sec = int(time.time()) 95 | # recognition time 96 | rec_tm = 0 97 | 98 | # Wrap everything in an keyboard interrupt handler 99 | try: 100 | while True: 101 | frame_tm = time.time() 102 | 103 | # Increment the frames 104 | total_frames += 1 105 | sec_frames += 1 106 | 107 | # Id we've entered a new second 108 | if sec != int(frame_tm): 109 | # Set the last seconds FPS 110 | fps = sec_frames 111 | 112 | # Set the new second and reset the counter 113 | sec = int(frame_tm) 114 | sec_frames = 0 115 | 116 | # Grab a single frame of video 117 | orig_frame, frame = video_capture.read_frame() 118 | 119 | frame = clahe.apply(frame) 120 | # Make a frame to put overlays in 121 | overlay = frame.copy() 122 | overlay = cv2.cvtColor(overlay, cv2.COLOR_GRAY2BGR) 123 | 124 | # Fetch the frame height and width 125 | height, width = frame.shape[:2] 126 | 127 | # Create a histogram of the image with 8 values 128 | hist = cv2.calcHist([frame], [0], None, [8], [0, 256]) 129 | # All values combined for percentage calculation 130 | hist_total = int(sum(hist)[0]) 131 | # Fill with the overall containing percentage 132 | hist_perc = [] 133 | 134 | # Loop though all values to calculate a percentage and add it to the overlay 135 | for index, value in enumerate(hist): 136 | value_perc = float(value[0]) / hist_total * 100 137 | hist_perc.append(value_perc) 138 | 139 | # Top left point, 10px margins 140 | p1 = (20 + (10 * index), 10) 141 | # Bottom right point makes the bar 10px thick, with an height of half the percentage 142 | p2 = (10 + (10 * index), int(value_perc / 2 + 10)) 143 | # Draw the bar in green 144 | cv2.rectangle(overlay, p1, p2, (0, 200, 0), thickness=cv2.FILLED) 145 | 146 | # Print the statis in the bottom left 147 | print_text(0, _("RESOLUTION: %dx%d") % (height, width)) 148 | print_text(1, _("FPS: %d") % (fps, )) 149 | print_text(2, _("FRAMES: %d") % (total_frames, )) 150 | print_text(3, _("RECOGNITION: %dms") % (round(rec_tm * 1000), )) 151 | 152 | # Show that slow mode is on, if it's on 153 | if slow_mode: 154 | cv2.putText(overlay, _("SLOW MODE"), (width - 66, height - 10), cv2.FONT_HERSHEY_SIMPLEX, .3, (0, 0, 255), 0, cv2.LINE_AA) 155 | 156 | # Ignore dark frames 157 | if hist_perc[0] > dark_threshold: 158 | # Show that this is an ignored frame in the top right 159 | cv2.putText(overlay, _("DARK FRAME"), (width - 68, 16), cv2.FONT_HERSHEY_SIMPLEX, .3, (0, 0, 255), 0, cv2.LINE_AA) 160 | else: 161 | # Show that this is an active frame 162 | cv2.putText(overlay, _("SCAN FRAME"), (width - 68, 16), cv2.FONT_HERSHEY_SIMPLEX, .3, (0, 255, 0), 0, cv2.LINE_AA) 163 | 164 | rec_tm = time.time() 165 | 166 | # Get the locations of all faces and their locations 167 | # Upsample it once 168 | face_locations = face_detector(frame, 1) 169 | rec_tm = time.time() - rec_tm 170 | 171 | # Loop though all faces and paint a circle around them 172 | for loc in face_locations: 173 | if use_cnn: 174 | loc = loc.rect 175 | 176 | # By default the circle around the face is red for no match 177 | color = (0, 0, 230) 178 | 179 | # Get the center X and Y from the rectangular points 180 | x = int((loc.right() - loc.left()) / 2) + loc.left() 181 | y = int((loc.bottom() - loc.top()) / 2) + loc.top() 182 | 183 | # Get the raduis from the with of the square 184 | r = (loc.right() - loc.left()) / 2 185 | # Add 20% padding 186 | r = int(r + (r * 0.2)) 187 | 188 | # If we have models defined for the current user 189 | if models: 190 | # Get the encoding of the face in the frame 191 | face_landmark = pose_predictor(orig_frame, loc) 192 | face_encoding = np.array(face_encoder.compute_face_descriptor(orig_frame, face_landmark, 1)) 193 | 194 | # Match this found face against a known face 195 | matches = np.linalg.norm(encodings - face_encoding, axis=1) 196 | 197 | # Get best match 198 | match_index = np.argmin(matches) 199 | match = matches[match_index] 200 | 201 | # If a model matches 202 | if 0 < match < video_certainty: 203 | # Turn the circle green 204 | color = (0, 230, 0) 205 | 206 | # Print the name of the model next to the circle 207 | circle_text = "{} (certainty: {})".format(models[match_index]["label"], round(match * 10, 3)) 208 | cv2.putText(overlay, circle_text, (int(x + r / 3), y - r), cv2.FONT_HERSHEY_SIMPLEX, .3, (0, 255, 0), 0, cv2.LINE_AA) 209 | # If no approved matches, show red text 210 | else: 211 | cv2.putText(overlay, "no match", (int(x + r / 3), y - r), cv2.FONT_HERSHEY_SIMPLEX, .3, (0, 0, 255), 0, cv2.LINE_AA) 212 | 213 | # Draw the Circle in green 214 | cv2.circle(overlay, (x, y), r, color, 2) 215 | 216 | # Add the overlay to the frame with some transparency 217 | alpha = 0.65 218 | frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2BGR) 219 | cv2.addWeighted(overlay, alpha, frame, 1 - alpha, 0, frame) 220 | 221 | # Show the image in a window 222 | cv2.imshow("Howdy Test", frame) 223 | 224 | # Quit on any keypress 225 | if cv2.waitKey(1) != -1: 226 | raise KeyboardInterrupt() 227 | 228 | frame_time = time.time() - frame_tm 229 | 230 | # Delay the frame if slowmode is on 231 | if slow_mode: 232 | time.sleep(max([.5 - frame_time, 0.0])) 233 | 234 | if exposure != -1: 235 | # For a strange reason on some cameras (e.g. Lenoxo X1E) 236 | # setting manual exposure works only after a couple frames 237 | # are captured and even after a delay it does not 238 | # always work. Setting exposure at every frame is 239 | # reliable though. 240 | video_capture.internal.set(cv2.CAP_PROP_AUTO_EXPOSURE, 1.0) # 1 = Manual 241 | video_capture.internal.set(cv2.CAP_PROP_EXPOSURE, float(exposure)) 242 | 243 | # On ctrl+C 244 | except KeyboardInterrupt: 245 | # Let the user know we're stopping 246 | print(_("\nClosing window")) 247 | 248 | # Release handle to the webcam 249 | cv2.destroyAllWindows() 250 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # NextGen OS - Face Authentication System 2 | 3 | ## Overview 4 | ProjectOS is a **next-gen authentication system** that enables **face recognition-based authentication** for operating systems. It enhances security and convenience by allowing users to log in using their facial biometrics instead of traditional passwords. 5 | 6 | ## Features 7 | - **Add New Face**: Register a new face for authentication. 8 | - **Match Face**: Authenticate users by matching their face. 9 | - **List Registered Faces**: Display all stored face encodings. 10 | - **Remove Face**: Delete a registered face from the system. 11 | - **Disable Face Recognition**: Temporarily disable face authentication. 12 | - **Configuration**: Manage system settings and preferences. 13 | - **CLI Support**: Command-line interface for easy usage. 14 | 15 | ## Files and Their Functions 16 | - `add_new_face.py` - Script to add a new user face for authentication. 17 | - `match_face.py` - Matches the detected face with stored face encodings. 18 | - `list_of_faces.py` - Lists all stored face encodings. 19 | - `remove_face.py` - Deletes a registered face from the system. 20 | - `disable_facerecognition.py` - Temporarily disables face authentication. 21 | - `configuration.py` - Manages system configuration settings. 22 | - `cli.py` - Command-line interface for interacting with the system. 23 | - `test_face.py` - Test script to verify face authentication functionality. 24 | 25 | ## Installation 26 | ```bash 27 | # Clone the repository 28 | git clone https://github.com/your-repo/ProjectOS.git 29 | cd ProjectOS 30 | 31 | # Install dependencies 32 | pip install -r requirements.txt 33 | 34 | # Run face registration 35 | python add_new_face.py 36 | ``` 37 | 38 | ## Usage 39 | ### Add a new face 40 | ```bash 41 | python add_new_face.py 42 | ``` 43 | ### Authenticate a user 44 | ```bash 45 | python match_face.py 46 | ``` 47 | ### List all registered faces 48 | ```bash 49 | python list_of_faces.py 50 | ``` 51 | ### Remove a face 52 | ```bash 53 | python remove_face.py 54 | ``` 55 | ### Disable face recognition 56 | ```bash 57 | python disable_facerecognition.py 58 | ``` 59 | 60 | ## Dependencies 61 | - OpenCV 62 | - Dlib 63 | - Face Recognition 64 | - NumPy 65 | - Python 3.x 66 | 67 | ## Future Scope 68 | - **Voice Authentication** Integration 69 | - **Multi-User Face Recognition** 70 | - **AI-Based Spoof Detection** to prevent face spoofing 71 | 72 | ## Contributing 73 | Feel free to submit issues and pull requests to improve the system. 74 | 75 | --------------------------------------------------------------------------------