├── src ├── xception │ ├── index.ts │ └── types.ts ├── faceProcessor │ ├── index.ts │ ├── types.ts │ ├── util.ts │ ├── extractParams.ts │ └── extractParamsFromWeigthMap.ts ├── mtcnn │ ├── config.ts │ ├── getSizesForScale.ts │ ├── normalize.ts │ ├── MtcnnBox.ts │ ├── bgrToRgbTensor.ts │ ├── index.ts │ ├── prelu.ts │ ├── pyramidDown.ts │ ├── PNet.ts │ ├── sharedLayers.ts │ ├── RNet.ts │ ├── types.ts │ ├── ONet.ts │ └── extractImagePatches.ts ├── ageGenderNet │ ├── index.ts │ ├── types.ts │ ├── extractParams.ts │ └── extractParamsFromWeigthMap.ts ├── faceExpressionNet │ ├── index.ts │ ├── FaceExpressions.ts │ └── FaceExpressionNet.ts ├── faceFeatureExtractor │ ├── index.ts │ ├── extractParamsFromWeigthMapTiny.ts │ ├── extractParamsFromWeigthMap.ts │ ├── extractParamsTiny.ts │ ├── extractParams.ts │ ├── types.ts │ ├── loadParamsFactory.ts │ ├── extractorsFactory.ts │ ├── FaceFeatureExtractor.ts │ ├── TinyFaceFeatureExtractor.ts │ └── denseBlock.ts ├── dom │ ├── fetchJson.ts │ ├── resolveInput.ts │ ├── fetchNetWeights.ts │ ├── isMediaElement.ts │ ├── isMediaLoaded.ts │ ├── fetchOrThrow.ts │ ├── types.ts │ ├── matchDimensions.ts │ ├── fetchImage.ts │ ├── loadWeightMap.ts │ ├── getMediaDimensions.ts │ ├── imageTensorToCanvas.ts │ ├── index.ts │ ├── getContext2dOrThrow.ts │ ├── bufferToImage.ts │ ├── awaitMediaLoaded.ts │ ├── createCanvas.ts │ ├── imageToSquare.ts │ ├── extractFaceTensors.ts │ ├── extractFaces.ts │ └── toNetInput.ts ├── draw │ ├── index.ts │ ├── drawContour.ts │ ├── drawDetections.ts │ ├── drawFaceExpressions.ts │ └── DrawBox.ts ├── faceLandmarkNet │ ├── index.ts │ ├── FaceLandmark68Net.ts │ └── FaceLandmark68TinyNet.ts ├── factories │ ├── index.ts │ ├── WithFaceDescriptor.ts │ ├── WithAge.ts │ ├── WithFaceDetection.ts │ ├── WithFaceExpressions.ts │ ├── WithGender.ts │ └── WithFaceLandmarks.ts ├── faceRecognitionNet │ ├── scaleLayer.ts │ ├── index.ts │ ├── convLayer.ts │ ├── types.ts │ └── residualLayer.ts ├── tinyYolov2 │ ├── leaky.ts │ ├── index.ts │ ├── depthwiseSeparableConv.ts │ ├── convWithBatchNorm.ts │ ├── const.ts │ ├── types.ts │ ├── TinyYolov2Options.ts │ ├── config.ts │ └── TinyYolov2.ts ├── tinyFaceDetector │ ├── TinyFaceDetectorOptions.ts │ ├── index.ts │ ├── const.ts │ └── TinyFaceDetector.ts ├── env │ ├── isNodejs.ts │ ├── isBrowser.ts │ ├── types.ts │ ├── createFileSystem.ts │ ├── createBrowserEnv.ts │ ├── createNodejsEnv.ts │ └── index.ts ├── ops │ ├── shuffleArray.ts │ ├── index.ts │ ├── normalize.ts │ ├── iou.ts │ ├── minBbox.ts │ ├── nonMaxSuppression.ts │ └── padToSquare.ts ├── globalApi │ ├── ComposableTask.ts │ ├── index.ts │ ├── types.ts │ ├── detectFaces.ts │ ├── allFaces.ts │ └── extractFacesAndComputeResults.ts ├── common │ ├── disposeUnusedWeightTensors.ts │ ├── fullyConnectedLayer.ts │ ├── index.ts │ ├── depthwiseSeparableConv.ts │ ├── loadConvParamsFactory.ts │ ├── convLayer.ts │ ├── extractWeightsFactory.ts │ ├── types.ts │ ├── extractWeightEntryFactory.ts │ ├── extractFCParamsFactory.ts │ ├── extractConvParamsFactory.ts │ ├── getModelUris.ts │ └── extractSeparableConvParamsFactory.ts ├── classes │ ├── Rect.ts │ ├── FaceLandmarks5.ts │ ├── BoundingBox.ts │ ├── index.ts │ ├── FaceMatch.ts │ ├── LabeledBox.ts │ ├── FaceDetection.ts │ ├── Dimensions.ts │ ├── PredictedBox.ts │ ├── FaceLandmarks68.ts │ ├── Point.ts │ ├── LabeledFaceDescriptors.ts │ └── ObjectDetection.ts ├── euclideanDistance.ts ├── ssdMobilenetv1 │ ├── pointwiseConvLayer.ts │ ├── index.ts │ ├── boxPredictionLayer.ts │ ├── SsdMobilenetv1Options.ts │ ├── types.ts │ ├── mobileNetV1.ts │ └── predictionLayer.ts ├── index.ts ├── resizeResults.ts └── utils │ └── index.ts ├── test ├── data │ ├── dummy.weights │ └── mtcnnFaceLandmarkPositions.json ├── images │ ├── angry.jpg │ ├── face1.png │ ├── face2.png │ ├── faces.jpg │ ├── white.png │ ├── surprised.jpg │ ├── angry_cropped.jpg │ ├── face_rectangular.png │ └── surprised_cropped.jpg ├── media │ └── video.mp4 ├── tests │ ├── dom │ │ ├── fetchJson.browser.test.ts │ │ ├── fetchNetWeights.browser.test.ts │ │ ├── fetchOrThrow.browser.test.ts │ │ └── fetchImage.browser.test.ts │ ├── ssdMobilenetv1 │ │ ├── expectedBoxes.ts │ │ └── ssdMobilenetv1.locateFaces.test.ts │ ├── globalApi │ │ └── consts.ts │ ├── classes │ │ ├── BoundingBox.test.ts │ │ ├── LabeledFaceDescriptors.test.ts │ │ └── Rect.test.ts │ ├── factories │ │ ├── WithFaceDetection.test.ts │ │ └── WithFaceLandmarks.test.ts │ ├── ops │ │ └── iou.test.ts │ └── tinyFaceDetector │ │ └── tinyFaceDetector.locateFaces.test.ts ├── Environment.ts ├── expectedTinyFaceDetectorBoxes.ts ├── expectFaceDetections.ts ├── env.node.ts ├── utils │ └── index.test.ts ├── env.ts ├── tests-legacy │ ├── mtcnn │ │ └── expectMtcnnResults.ts │ ├── faceRecognitionNet.uncompressed.test.ts │ ├── ssdMobilenetv1.locateFaces.uncompressed.test.ts │ ├── faceLandmark68Net.uncompressed.test.ts │ └── faceLandmark68TinyNet.uncompressed.test.ts ├── expectFaceDetectionsWithLandmarks.ts └── expectFullFaceDescriptions.ts ├── examples ├── images │ ├── bbt1.jpg │ ├── bbt2.jpg │ ├── bbt3.jpg │ ├── bbt4.jpg │ ├── bbt5.jpg │ ├── sad.jpg │ ├── angry.jpg │ ├── happy.jpg │ ├── amy │ │ ├── amy1.png │ │ ├── amy2.png │ │ ├── amy3.png │ │ ├── amy4.png │ │ └── amy5.png │ ├── disgusted.jpg │ ├── fearful.jpg │ ├── neutral.jpg │ ├── raj │ │ ├── raj1.png │ │ ├── raj2.png │ │ ├── raj3.png │ │ ├── raj4.png │ │ └── raj5.png │ ├── surprised.jpg │ ├── howard │ │ ├── howard1.png │ │ ├── howard2.png │ │ ├── howard3.png │ │ ├── howard4.png │ │ └── howard5.png │ ├── penny │ │ ├── penny1.png │ │ ├── penny2.png │ │ ├── penny3.png │ │ ├── penny4.png │ │ └── penny5.png │ ├── stuart │ │ ├── stuart1.png │ │ ├── stuart2.png │ │ ├── stuart3.png │ │ ├── stuart4.png │ │ └── stuart5.png │ ├── leonard │ │ ├── leonard1.png │ │ ├── leonard2.png │ │ ├── leonard3.png │ │ ├── leonard4.png │ │ └── leonard5.png │ ├── sheldon │ │ ├── sheldon1.png │ │ ├── sheldon2.png │ │ ├── sheldon3.png │ │ ├── sheldon4.png │ │ └── sheldon5.png │ └── bernadette │ │ ├── bernadette1.png │ │ ├── bernadette2.png │ │ ├── bernadette3.png │ │ ├── bernadette4.png │ │ └── bernadette5.png ├── media │ └── bbt.mp4 ├── examples-browser │ ├── public │ │ ├── menu_icon.png │ │ ├── github_link_icon.png │ │ ├── styles.css │ │ └── js │ │ │ ├── imageSelectionControls.js │ │ │ └── bbt.js │ └── package.json └── examples-nodejs │ ├── package.json │ ├── commons │ ├── index.ts │ ├── saveFile.ts │ ├── env.ts │ └── faceDetection.ts │ ├── faceDetection.ts │ ├── faceLandmarkDetection.ts │ ├── faceExpressionRecognition.ts │ ├── ageAndGenderRecognition.ts │ └── faceRecognition.ts ├── weights ├── mtcnn_model-shard1 ├── age_gender_model-shard1 ├── face_expression_model-shard1 ├── ssd_mobilenetv1_model-shard1 ├── ssd_mobilenetv1_model-shard2 ├── face_landmark_68_model-shard1 ├── face_recognition_model-shard1 ├── face_recognition_model-shard2 ├── tiny_face_detector_model-shard1 └── face_landmark_68_tiny_model-shard1 ├── tsconfig.test.json ├── .gitignore ├── .npmignore ├── tsconfig.es6.json ├── jasmine-node.js ├── .travis.yml ├── tsconfig.json ├── LICENSE ├── rollup.config.js ├── typedoc.config.js └── karma.conf.js /src/xception/index.ts: -------------------------------------------------------------------------------- 1 | export * from './TinyXception'; -------------------------------------------------------------------------------- /test/data/dummy.weights: -------------------------------------------------------------------------------- 1 | +��>�%�8�vZ>w�X>��ʽ��*cz -------------------------------------------------------------------------------- /src/faceProcessor/index.ts: -------------------------------------------------------------------------------- 1 | export * from './FaceProcessor'; -------------------------------------------------------------------------------- /src/mtcnn/config.ts: -------------------------------------------------------------------------------- 1 | export const CELL_STRIDE = 2 2 | export const CELL_SIZE = 12 -------------------------------------------------------------------------------- /src/ageGenderNet/index.ts: -------------------------------------------------------------------------------- 1 | export * from './AgeGenderNet'; 2 | export * from './types'; -------------------------------------------------------------------------------- /test/images/angry.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/test/images/angry.jpg -------------------------------------------------------------------------------- /test/images/face1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/test/images/face1.png -------------------------------------------------------------------------------- /test/images/face2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/test/images/face2.png -------------------------------------------------------------------------------- /test/images/faces.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/test/images/faces.jpg -------------------------------------------------------------------------------- /test/images/white.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/test/images/white.png -------------------------------------------------------------------------------- /test/media/video.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/test/media/video.mp4 -------------------------------------------------------------------------------- /examples/images/bbt1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/examples/images/bbt1.jpg -------------------------------------------------------------------------------- /examples/images/bbt2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/examples/images/bbt2.jpg -------------------------------------------------------------------------------- /examples/images/bbt3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/examples/images/bbt3.jpg -------------------------------------------------------------------------------- /examples/images/bbt4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/examples/images/bbt4.jpg -------------------------------------------------------------------------------- /examples/images/bbt5.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/examples/images/bbt5.jpg -------------------------------------------------------------------------------- /examples/images/sad.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/examples/images/sad.jpg -------------------------------------------------------------------------------- /examples/media/bbt.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/examples/media/bbt.mp4 -------------------------------------------------------------------------------- /src/faceExpressionNet/index.ts: -------------------------------------------------------------------------------- 1 | export * from './FaceExpressionNet'; 2 | export * from './FaceExpressions'; -------------------------------------------------------------------------------- /examples/images/angry.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/examples/images/angry.jpg -------------------------------------------------------------------------------- /examples/images/happy.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/examples/images/happy.jpg -------------------------------------------------------------------------------- /test/images/surprised.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/test/images/surprised.jpg -------------------------------------------------------------------------------- /weights/mtcnn_model-shard1: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/weights/mtcnn_model-shard1 -------------------------------------------------------------------------------- /examples/images/amy/amy1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/examples/images/amy/amy1.png -------------------------------------------------------------------------------- /examples/images/amy/amy2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/examples/images/amy/amy2.png -------------------------------------------------------------------------------- /examples/images/amy/amy3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/examples/images/amy/amy3.png -------------------------------------------------------------------------------- /examples/images/amy/amy4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/examples/images/amy/amy4.png -------------------------------------------------------------------------------- /examples/images/amy/amy5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/examples/images/amy/amy5.png -------------------------------------------------------------------------------- /examples/images/disgusted.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/examples/images/disgusted.jpg -------------------------------------------------------------------------------- /examples/images/fearful.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/examples/images/fearful.jpg -------------------------------------------------------------------------------- /examples/images/neutral.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/examples/images/neutral.jpg -------------------------------------------------------------------------------- /examples/images/raj/raj1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/examples/images/raj/raj1.png -------------------------------------------------------------------------------- /examples/images/raj/raj2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/examples/images/raj/raj2.png -------------------------------------------------------------------------------- /examples/images/raj/raj3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/examples/images/raj/raj3.png -------------------------------------------------------------------------------- /examples/images/raj/raj4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/examples/images/raj/raj4.png -------------------------------------------------------------------------------- /examples/images/raj/raj5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/examples/images/raj/raj5.png -------------------------------------------------------------------------------- /examples/images/surprised.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/examples/images/surprised.jpg -------------------------------------------------------------------------------- /test/images/angry_cropped.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/test/images/angry_cropped.jpg -------------------------------------------------------------------------------- /src/faceFeatureExtractor/index.ts: -------------------------------------------------------------------------------- 1 | export * from './FaceFeatureExtractor'; 2 | export * from './TinyFaceFeatureExtractor'; -------------------------------------------------------------------------------- /tsconfig.test.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "./tsconfig.json", 3 | "include": [ 4 | "src", 5 | "test" 6 | ] 7 | } 8 | -------------------------------------------------------------------------------- /weights/age_gender_model-shard1: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/weights/age_gender_model-shard1 -------------------------------------------------------------------------------- /examples/images/howard/howard1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/examples/images/howard/howard1.png -------------------------------------------------------------------------------- /examples/images/howard/howard2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/examples/images/howard/howard2.png -------------------------------------------------------------------------------- /examples/images/howard/howard3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/examples/images/howard/howard3.png -------------------------------------------------------------------------------- /examples/images/howard/howard4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/examples/images/howard/howard4.png -------------------------------------------------------------------------------- /examples/images/howard/howard5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/examples/images/howard/howard5.png -------------------------------------------------------------------------------- /examples/images/penny/penny1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/examples/images/penny/penny1.png -------------------------------------------------------------------------------- /examples/images/penny/penny2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/examples/images/penny/penny2.png -------------------------------------------------------------------------------- /examples/images/penny/penny3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/examples/images/penny/penny3.png -------------------------------------------------------------------------------- /examples/images/penny/penny4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/examples/images/penny/penny4.png -------------------------------------------------------------------------------- /examples/images/penny/penny5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/examples/images/penny/penny5.png -------------------------------------------------------------------------------- /examples/images/stuart/stuart1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/examples/images/stuart/stuart1.png -------------------------------------------------------------------------------- /examples/images/stuart/stuart2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/examples/images/stuart/stuart2.png -------------------------------------------------------------------------------- /examples/images/stuart/stuart3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/examples/images/stuart/stuart3.png -------------------------------------------------------------------------------- /examples/images/stuart/stuart4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/examples/images/stuart/stuart4.png -------------------------------------------------------------------------------- /examples/images/stuart/stuart5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/examples/images/stuart/stuart5.png -------------------------------------------------------------------------------- /test/images/face_rectangular.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/test/images/face_rectangular.png -------------------------------------------------------------------------------- /test/images/surprised_cropped.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/test/images/surprised_cropped.jpg -------------------------------------------------------------------------------- /examples/images/leonard/leonard1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/examples/images/leonard/leonard1.png -------------------------------------------------------------------------------- /examples/images/leonard/leonard2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/examples/images/leonard/leonard2.png -------------------------------------------------------------------------------- /examples/images/leonard/leonard3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/examples/images/leonard/leonard3.png -------------------------------------------------------------------------------- /examples/images/leonard/leonard4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/examples/images/leonard/leonard4.png -------------------------------------------------------------------------------- /examples/images/leonard/leonard5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/examples/images/leonard/leonard5.png -------------------------------------------------------------------------------- /examples/images/sheldon/sheldon1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/examples/images/sheldon/sheldon1.png -------------------------------------------------------------------------------- /examples/images/sheldon/sheldon2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/examples/images/sheldon/sheldon2.png -------------------------------------------------------------------------------- /examples/images/sheldon/sheldon3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/examples/images/sheldon/sheldon3.png -------------------------------------------------------------------------------- /examples/images/sheldon/sheldon4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/examples/images/sheldon/sheldon4.png -------------------------------------------------------------------------------- /examples/images/sheldon/sheldon5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/examples/images/sheldon/sheldon5.png -------------------------------------------------------------------------------- /src/faceProcessor/types.ts: -------------------------------------------------------------------------------- 1 | import { FCParams } from '../common'; 2 | 3 | export type NetParams = { 4 | fc: FCParams 5 | } 6 | 7 | -------------------------------------------------------------------------------- /weights/face_expression_model-shard1: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/weights/face_expression_model-shard1 -------------------------------------------------------------------------------- /weights/ssd_mobilenetv1_model-shard1: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/weights/ssd_mobilenetv1_model-shard1 -------------------------------------------------------------------------------- /weights/ssd_mobilenetv1_model-shard2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/weights/ssd_mobilenetv1_model-shard2 -------------------------------------------------------------------------------- /weights/face_landmark_68_model-shard1: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/weights/face_landmark_68_model-shard1 -------------------------------------------------------------------------------- /weights/face_recognition_model-shard1: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/weights/face_recognition_model-shard1 -------------------------------------------------------------------------------- /weights/face_recognition_model-shard2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/weights/face_recognition_model-shard2 -------------------------------------------------------------------------------- /weights/tiny_face_detector_model-shard1: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/weights/tiny_face_detector_model-shard1 -------------------------------------------------------------------------------- /examples/images/bernadette/bernadette1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/examples/images/bernadette/bernadette1.png -------------------------------------------------------------------------------- /examples/images/bernadette/bernadette2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/examples/images/bernadette/bernadette2.png -------------------------------------------------------------------------------- /examples/images/bernadette/bernadette3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/examples/images/bernadette/bernadette3.png -------------------------------------------------------------------------------- /examples/images/bernadette/bernadette4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/examples/images/bernadette/bernadette4.png -------------------------------------------------------------------------------- /examples/images/bernadette/bernadette5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/examples/images/bernadette/bernadette5.png -------------------------------------------------------------------------------- /weights/face_landmark_68_tiny_model-shard1: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/weights/face_landmark_68_tiny_model-shard1 -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | .rpt2_cache 3 | .env* 4 | 5 | tmp 6 | proto 7 | weights_uncompressed 8 | weights_unused 9 | docs 10 | out 11 | build -------------------------------------------------------------------------------- /examples/examples-browser/public/menu_icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/examples/examples-browser/public/menu_icon.png -------------------------------------------------------------------------------- /examples/examples-browser/public/github_link_icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrudhviGNV/face-api.js/master/examples/examples-browser/public/github_link_icon.png -------------------------------------------------------------------------------- /.npmignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | .rpt2_cache 3 | 4 | examples 5 | proto 6 | weights 7 | weights_uncompressed 8 | weights_unused 9 | src 10 | test 11 | tools 12 | docs -------------------------------------------------------------------------------- /examples/examples-nodejs/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "author": "justadudewhohacks", 3 | "license": "MIT", 4 | "dependencies": { 5 | "face-api.js": "../../" 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /examples/examples-nodejs/commons/index.ts: -------------------------------------------------------------------------------- 1 | export { canvas } from './env'; 2 | export { faceDetectionNet, faceDetectionOptions } from './faceDetection'; 3 | export { saveFile } from './saveFile'; -------------------------------------------------------------------------------- /src/dom/fetchJson.ts: -------------------------------------------------------------------------------- 1 | import { fetchOrThrow } from './fetchOrThrow'; 2 | 3 | export async function fetchJson(uri: string): Promise { 4 | return (await fetchOrThrow(uri)).json() 5 | } 6 | -------------------------------------------------------------------------------- /src/mtcnn/getSizesForScale.ts: -------------------------------------------------------------------------------- 1 | export function getSizesForScale(scale: number, [height, width]: number[]) { 2 | return { 3 | height: Math.floor(height * scale), 4 | width: Math.floor(width * scale) 5 | } 6 | } -------------------------------------------------------------------------------- /src/draw/index.ts: -------------------------------------------------------------------------------- 1 | export * from './drawContour' 2 | export * from './drawDetections' 3 | export * from './drawFaceExpressions' 4 | export * from './DrawBox' 5 | export * from './DrawFaceLandmarks' 6 | export * from './DrawTextField' -------------------------------------------------------------------------------- /tsconfig.es6.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "./tsconfig.json", 3 | "compilerOptions": { 4 | "outDir": "build/es6", 5 | "module": "es6", 6 | "target": "es5" 7 | }, 8 | "include": [ 9 | "src" 10 | ] 11 | } 12 | -------------------------------------------------------------------------------- /src/dom/resolveInput.ts: -------------------------------------------------------------------------------- 1 | import { env } from '../env'; 2 | 3 | export function resolveInput(arg: string | any) { 4 | if (!env.isNodejs() && typeof arg === 'string') { 5 | return document.getElementById(arg) 6 | } 7 | return arg 8 | } -------------------------------------------------------------------------------- /src/faceLandmarkNet/index.ts: -------------------------------------------------------------------------------- 1 | import { FaceLandmark68Net } from './FaceLandmark68Net'; 2 | 3 | export * from './FaceLandmark68Net'; 4 | export * from './FaceLandmark68TinyNet'; 5 | 6 | export class FaceLandmarkNet extends FaceLandmark68Net {} -------------------------------------------------------------------------------- /src/factories/index.ts: -------------------------------------------------------------------------------- 1 | export * from './WithFaceDescriptor' 2 | export * from './WithFaceDetection' 3 | export * from './WithFaceExpressions' 4 | export * from './WithFaceLandmarks' 5 | export * from './WithAge' 6 | export * from './WithGender' -------------------------------------------------------------------------------- /src/mtcnn/normalize.ts: -------------------------------------------------------------------------------- 1 | import * as tf from '@tensorflow/tfjs-core'; 2 | 3 | export function normalize(x: tf.Tensor4D): tf.Tensor4D { 4 | return tf.tidy( 5 | () => tf.mul(tf.sub(x, tf.scalar(127.5)), tf.scalar(0.0078125)) 6 | ) 7 | } -------------------------------------------------------------------------------- /src/dom/fetchNetWeights.ts: -------------------------------------------------------------------------------- 1 | import { fetchOrThrow } from './fetchOrThrow'; 2 | 3 | export async function fetchNetWeights(uri: string): Promise { 4 | return new Float32Array(await (await fetchOrThrow(uri)).arrayBuffer()) 5 | } 6 | -------------------------------------------------------------------------------- /src/mtcnn/MtcnnBox.ts: -------------------------------------------------------------------------------- 1 | import { Box } from '../classes'; 2 | 3 | export class MtcnnBox extends Box { 4 | constructor(left: number, top: number, right: number, bottom: number) { 5 | super({ left, top, right, bottom }, true) 6 | } 7 | } -------------------------------------------------------------------------------- /src/mtcnn/bgrToRgbTensor.ts: -------------------------------------------------------------------------------- 1 | import * as tf from '@tensorflow/tfjs-core'; 2 | 3 | export function bgrToRgbTensor(tensor: tf.Tensor4D): tf.Tensor4D { 4 | return tf.tidy( 5 | () => tf.stack(tf.unstack(tensor, 3).reverse(), 3) 6 | ) as tf.Tensor4D 7 | } -------------------------------------------------------------------------------- /examples/examples-browser/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "scripts": { 3 | "start": "node server.js" 4 | }, 5 | "author": "justadudewhohacks", 6 | "license": "MIT", 7 | "dependencies": { 8 | "express": "^4.17.1", 9 | "request": "^2.88.2" 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /src/mtcnn/index.ts: -------------------------------------------------------------------------------- 1 | import { Mtcnn } from './Mtcnn'; 2 | 3 | export * from './Mtcnn'; 4 | export * from './MtcnnOptions'; 5 | 6 | export function createMtcnn(weights: Float32Array) { 7 | const net = new Mtcnn() 8 | net.extractWeights(weights) 9 | return net 10 | } -------------------------------------------------------------------------------- /src/dom/isMediaElement.ts: -------------------------------------------------------------------------------- 1 | import { env } from '../env'; 2 | 3 | export function isMediaElement(input: any) { 4 | 5 | const { Image, Canvas, Video } = env.getEnv() 6 | 7 | return input instanceof Image 8 | || input instanceof Canvas 9 | || input instanceof Video 10 | } -------------------------------------------------------------------------------- /src/faceRecognitionNet/scaleLayer.ts: -------------------------------------------------------------------------------- 1 | import * as tf from '@tensorflow/tfjs-core'; 2 | 3 | import { ScaleLayerParams } from './types'; 4 | 5 | export function scale(x: tf.Tensor4D, params: ScaleLayerParams): tf.Tensor4D { 6 | return tf.add(tf.mul(x, params.weights), params.biases) 7 | } 8 | -------------------------------------------------------------------------------- /src/mtcnn/prelu.ts: -------------------------------------------------------------------------------- 1 | import * as tf from '@tensorflow/tfjs-core'; 2 | 3 | export function prelu(x: T, alpha: tf.Tensor1D): T { 4 | return tf.tidy(() => 5 | tf.add( 6 | tf.relu(x), 7 | tf.mul(alpha, tf.neg(tf.relu(tf.neg(x)))) 8 | ) 9 | ) 10 | } 11 | -------------------------------------------------------------------------------- /test/tests/dom/fetchJson.browser.test.ts: -------------------------------------------------------------------------------- 1 | import { fetchJson } from '../../../src'; 2 | 3 | describe('fetchJson', () => { 4 | 5 | it('fetches json', async () => { 6 | const url = 'test/data/boxes.json' 7 | expect(async () => await fetchJson(url)).not.toThrow() 8 | }) 9 | 10 | }) 11 | -------------------------------------------------------------------------------- /src/faceRecognitionNet/index.ts: -------------------------------------------------------------------------------- 1 | import { FaceRecognitionNet } from './FaceRecognitionNet'; 2 | 3 | export * from './FaceRecognitionNet'; 4 | 5 | export function createFaceRecognitionNet(weights: Float32Array) { 6 | const net = new FaceRecognitionNet() 7 | net.extractWeights(weights) 8 | return net 9 | } -------------------------------------------------------------------------------- /jasmine-node.js: -------------------------------------------------------------------------------- 1 | let spec_files = ['**/*.test.ts'] 2 | 3 | // exclude browser tests 4 | spec_files = spec_files.concat(['!**/*.browser.test.ts']) 5 | spec_files = spec_files.concat(['!**/tests-legacy/**/*.ts']) 6 | 7 | module.exports = { 8 | spec_dir: 'test', 9 | spec_files, 10 | random: false 11 | } -------------------------------------------------------------------------------- /src/tinyYolov2/leaky.ts: -------------------------------------------------------------------------------- 1 | import * as tf from '@tensorflow/tfjs-core'; 2 | 3 | export function leaky(x: tf.Tensor4D): tf.Tensor4D { 4 | return tf.tidy(() => { 5 | const min = tf.mul(x, tf.scalar(0.10000000149011612)) 6 | return tf.add(tf.relu(tf.sub(x, min)), min) 7 | //return tf.maximum(x, min) 8 | }) 9 | } -------------------------------------------------------------------------------- /src/tinyFaceDetector/TinyFaceDetectorOptions.ts: -------------------------------------------------------------------------------- 1 | import { ITinyYolov2Options, TinyYolov2Options } from '../tinyYolov2'; 2 | 3 | export interface ITinyFaceDetectorOptions extends ITinyYolov2Options {} 4 | 5 | export class TinyFaceDetectorOptions extends TinyYolov2Options { 6 | protected _name: string = 'TinyFaceDetectorOptions' 7 | } -------------------------------------------------------------------------------- /src/dom/isMediaLoaded.ts: -------------------------------------------------------------------------------- 1 | import { env } from '../env'; 2 | 3 | export function isMediaLoaded(media: HTMLImageElement | HTMLVideoElement) : boolean { 4 | 5 | const { Image, Video } = env.getEnv() 6 | 7 | return (media instanceof Image && media.complete) 8 | || (media instanceof Video && media.readyState >= 3) 9 | } 10 | -------------------------------------------------------------------------------- /src/env/isNodejs.ts: -------------------------------------------------------------------------------- 1 | export function isNodejs(): boolean { 2 | return typeof global === 'object' 3 | && typeof require === 'function' 4 | && typeof module !== 'undefined' 5 | // issues with gatsby.js: module.exports is undefined 6 | // && !!module.exports 7 | && typeof process !== 'undefined' && !!process.version 8 | } -------------------------------------------------------------------------------- /src/ops/shuffleArray.ts: -------------------------------------------------------------------------------- 1 | export function shuffleArray(inputArray: any[]) { 2 | const array = inputArray.slice() 3 | for (let i = array.length - 1; i > 0; i--) { 4 | const j = Math.floor(Math.random() * (i + 1)) 5 | const x = array[i] 6 | array[i] = array[j] 7 | array[j] = x 8 | } 9 | return array 10 | } -------------------------------------------------------------------------------- /src/tinyFaceDetector/index.ts: -------------------------------------------------------------------------------- 1 | import { TinyFaceDetector } from './TinyFaceDetector'; 2 | 3 | export * from './TinyFaceDetector'; 4 | export * from './TinyFaceDetectorOptions'; 5 | 6 | export function createTinyFaceDetector(weights: Float32Array) { 7 | const net = new TinyFaceDetector() 8 | net.extractWeights(weights) 9 | return net 10 | } -------------------------------------------------------------------------------- /src/globalApi/ComposableTask.ts: -------------------------------------------------------------------------------- 1 | export class ComposableTask { 2 | 3 | public async then( 4 | onfulfilled: (value: T) => T | PromiseLike 5 | ): Promise { 6 | return onfulfilled(await this.run()) 7 | } 8 | 9 | public async run(): Promise { 10 | throw new Error('ComposableTask - run is not implemented') 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /src/globalApi/index.ts: -------------------------------------------------------------------------------- 1 | export * from './allFaces' 2 | export * from './ComposableTask' 3 | export * from './ComputeFaceDescriptorsTasks' 4 | export * from './detectFaces' 5 | export * from './DetectFacesTasks' 6 | export * from './DetectFaceLandmarksTasks' 7 | export * from './FaceMatcher' 8 | export * from './nets' 9 | export * from './types' 10 | 11 | -------------------------------------------------------------------------------- /src/common/disposeUnusedWeightTensors.ts: -------------------------------------------------------------------------------- 1 | import { ParamMapping } from './types'; 2 | 3 | export function disposeUnusedWeightTensors(weightMap: any, paramMappings: ParamMapping[]) { 4 | Object.keys(weightMap).forEach(path => { 5 | if (!paramMappings.some(pm => pm.originalPath === path)) { 6 | weightMap[path].dispose() 7 | } 8 | }) 9 | } 10 | -------------------------------------------------------------------------------- /src/common/fullyConnectedLayer.ts: -------------------------------------------------------------------------------- 1 | import * as tf from '@tensorflow/tfjs-core'; 2 | 3 | import { FCParams } from './types'; 4 | 5 | export function fullyConnectedLayer( 6 | x: tf.Tensor2D, 7 | params: FCParams 8 | ): tf.Tensor2D { 9 | return tf.tidy(() => 10 | tf.add( 11 | tf.matMul(x, params.weights), 12 | params.bias 13 | ) 14 | ) 15 | } -------------------------------------------------------------------------------- /test/Environment.ts: -------------------------------------------------------------------------------- 1 | import { NeuralNetwork } from '../src'; 2 | 3 | export type TestEnv = { 4 | loadImage: (uri: string) => Promise 5 | loadJson: (uri: string) => Promise 6 | initNet: >( 7 | net: TNet, 8 | uncompressedFilename?: string | boolean, 9 | isUnusedModel?: boolean 10 | ) => any 11 | } -------------------------------------------------------------------------------- /examples/examples-nodejs/commons/saveFile.ts: -------------------------------------------------------------------------------- 1 | import * as fs from 'fs'; 2 | import * as path from 'path'; 3 | 4 | const baseDir = path.resolve(__dirname, '../out') 5 | 6 | export function saveFile(fileName: string, buf: Buffer) { 7 | if (!fs.existsSync(baseDir)) { 8 | fs.mkdirSync(baseDir) 9 | } 10 | 11 | fs.writeFileSync(path.resolve(baseDir, fileName), buf) 12 | } -------------------------------------------------------------------------------- /test/tests/dom/fetchNetWeights.browser.test.ts: -------------------------------------------------------------------------------- 1 | import { fetchNetWeights } from '../../../src'; 2 | 3 | describe('fetchNetWeights', () => { 4 | 5 | it('fetches .weights file', async () => { 6 | const url = 'base/test/data/dummy.weights' 7 | const weights = await fetchNetWeights(url) 8 | expect(weights instanceof Float32Array).toBe(true) 9 | }) 10 | 11 | }) 12 | -------------------------------------------------------------------------------- /src/env/isBrowser.ts: -------------------------------------------------------------------------------- 1 | export function isBrowser(): boolean { 2 | return typeof window === 'object' 3 | && typeof document !== 'undefined' 4 | && typeof HTMLImageElement !== 'undefined' 5 | && typeof HTMLCanvasElement !== 'undefined' 6 | && typeof HTMLVideoElement !== 'undefined' 7 | && typeof ImageData !== 'undefined' 8 | && typeof CanvasRenderingContext2D !== 'undefined' 9 | } -------------------------------------------------------------------------------- /src/ops/index.ts: -------------------------------------------------------------------------------- 1 | export * from './iou' 2 | export * from './minBbox' 3 | export * from './nonMaxSuppression' 4 | export * from './normalize' 5 | export * from './padToSquare' 6 | export * from './shuffleArray' 7 | 8 | export function sigmoid(x: number) { 9 | return 1 / (1 + Math.exp(-x)) 10 | } 11 | 12 | export function inverseSigmoid(x: number) { 13 | return Math.log(x / (1 - x)) 14 | } -------------------------------------------------------------------------------- /src/factories/WithFaceDescriptor.ts: -------------------------------------------------------------------------------- 1 | export type WithFaceDescriptor = TSource & { 2 | descriptor: Float32Array 3 | } 4 | 5 | export function extendWithFaceDescriptor< 6 | TSource 7 | > ( 8 | sourceObj: TSource, 9 | descriptor: Float32Array 10 | ): WithFaceDescriptor { 11 | 12 | const extension = { descriptor } 13 | return Object.assign({}, sourceObj, extension) 14 | } 15 | 16 | -------------------------------------------------------------------------------- /src/classes/Rect.ts: -------------------------------------------------------------------------------- 1 | import { Box } from './Box'; 2 | 3 | export interface IRect { 4 | x: number 5 | y: number 6 | width: number 7 | height: number 8 | } 9 | 10 | export class Rect extends Box implements IRect { 11 | constructor(x: number, y: number, width: number, height: number, allowNegativeDimensions: boolean = false) { 12 | super({ x, y, width, height }, allowNegativeDimensions) 13 | } 14 | } -------------------------------------------------------------------------------- /src/dom/fetchOrThrow.ts: -------------------------------------------------------------------------------- 1 | import { env } from '../env'; 2 | 3 | export async function fetchOrThrow( 4 | url: string, 5 | init?: RequestInit 6 | ): Promise { 7 | 8 | const fetch = env.getEnv().fetch 9 | const res = await fetch(url, init) 10 | if (!(res.status < 400)) { 11 | throw new Error(`failed to fetch: (${res.status}) ${res.statusText}, from url: ${res.url}`) 12 | } 13 | return res 14 | } -------------------------------------------------------------------------------- /src/tinyFaceDetector/const.ts: -------------------------------------------------------------------------------- 1 | import { Point } from '../classes' 2 | 3 | export const IOU_THRESHOLD = 0.4 4 | 5 | export const BOX_ANCHORS = [ 6 | new Point(1.603231, 2.094468), 7 | new Point(6.041143, 7.080126), 8 | new Point(2.882459, 3.518061), 9 | new Point(4.266906, 5.178857), 10 | new Point(9.041765, 10.66308) 11 | ] 12 | 13 | export const MEAN_RGB: [number, number, number] = [117.001, 114.697, 97.404] -------------------------------------------------------------------------------- /src/tinyYolov2/index.ts: -------------------------------------------------------------------------------- 1 | import { TinyYolov2 } from './TinyYolov2'; 2 | 3 | export * from './TinyYolov2Options'; 4 | 5 | export * from './config' 6 | export * from './types' 7 | 8 | export { TinyYolov2 } 9 | 10 | export function createTinyYolov2(weights: Float32Array, withSeparableConvs: boolean = true) { 11 | const net = new TinyYolov2(withSeparableConvs) 12 | net.extractWeights(weights) 13 | return net 14 | } -------------------------------------------------------------------------------- /src/common/index.ts: -------------------------------------------------------------------------------- 1 | export * from './convLayer' 2 | export * from './depthwiseSeparableConv' 3 | export * from './disposeUnusedWeightTensors' 4 | export * from './extractConvParamsFactory' 5 | export * from './extractFCParamsFactory' 6 | export * from './extractSeparableConvParamsFactory' 7 | export * from './extractWeightEntryFactory' 8 | export * from './extractWeightsFactory' 9 | export * from './getModelUris' 10 | export * from './types' -------------------------------------------------------------------------------- /src/dom/types.ts: -------------------------------------------------------------------------------- 1 | import * as tf from '@tensorflow/tfjs-core'; 2 | 3 | import { NetInput } from './NetInput'; 4 | 5 | export type TMediaElement = HTMLImageElement | HTMLVideoElement | HTMLCanvasElement 6 | 7 | export type TResolvedNetInput = TMediaElement | tf.Tensor3D | tf.Tensor4D 8 | 9 | export type TNetInputArg = string | TResolvedNetInput 10 | 11 | export type TNetInput = TNetInputArg | Array | NetInput | tf.Tensor4D -------------------------------------------------------------------------------- /src/factories/WithAge.ts: -------------------------------------------------------------------------------- 1 | export type WithAge = TSource & { 2 | age: number 3 | } 4 | 5 | export function isWithAge(obj: any): obj is WithAge<{}> { 6 | return typeof obj['age'] === 'number' 7 | } 8 | 9 | export function extendWithAge< 10 | TSource 11 | > ( 12 | sourceObj: TSource, 13 | age: number 14 | ): WithAge { 15 | 16 | const extension = { age } 17 | return Object.assign({}, sourceObj, extension) 18 | } -------------------------------------------------------------------------------- /src/classes/FaceLandmarks5.ts: -------------------------------------------------------------------------------- 1 | import { getCenterPoint } from '../utils'; 2 | import { FaceLandmarks } from './FaceLandmarks'; 3 | import { Point } from './Point'; 4 | 5 | 6 | export class FaceLandmarks5 extends FaceLandmarks { 7 | 8 | protected getRefPointsForAlignment(): Point[] { 9 | const pts = this.positions 10 | return [ 11 | pts[0], 12 | pts[1], 13 | getCenterPoint([pts[3], pts[4]]) 14 | ] 15 | } 16 | } -------------------------------------------------------------------------------- /src/dom/matchDimensions.ts: -------------------------------------------------------------------------------- 1 | import { IDimensions } from '../classes'; 2 | import { getMediaDimensions } from './getMediaDimensions'; 3 | 4 | export function matchDimensions(input: IDimensions, reference: IDimensions, useMediaDimensions: boolean = false) { 5 | const { width, height } = useMediaDimensions 6 | ? getMediaDimensions(reference) 7 | : reference 8 | input.width = width 9 | input.height = height 10 | return { width, height } 11 | } -------------------------------------------------------------------------------- /src/classes/BoundingBox.ts: -------------------------------------------------------------------------------- 1 | import { Box } from './Box'; 2 | 3 | export interface IBoundingBox { 4 | left: number 5 | top: number 6 | right: number 7 | bottom: number 8 | } 9 | 10 | export class BoundingBox extends Box implements IBoundingBox { 11 | constructor(left: number, top: number, right: number, bottom: number, allowNegativeDimensions: boolean = false) { 12 | super({ left, top, right, bottom }, allowNegativeDimensions) 13 | } 14 | } -------------------------------------------------------------------------------- /src/euclideanDistance.ts: -------------------------------------------------------------------------------- 1 | export function euclideanDistance(arr1: number[] | Float32Array, arr2: number[] | Float32Array) { 2 | if (arr1.length !== arr2.length) 3 | throw new Error('euclideanDistance: arr1.length !== arr2.length') 4 | 5 | const desc1 = Array.from(arr1) 6 | const desc2 = Array.from(arr2) 7 | 8 | return Math.sqrt( 9 | desc1 10 | .map((val, i) => val - desc2[i]) 11 | .reduce((res, diff) => res + Math.pow(diff, 2), 0) 12 | ) 13 | } -------------------------------------------------------------------------------- /test/tests/dom/fetchOrThrow.browser.test.ts: -------------------------------------------------------------------------------- 1 | import { fetchOrThrow } from '../../../src'; 2 | 3 | describe('fetchOrThrow', () => { 4 | 5 | it('404, throws', async () => { 6 | const url = '/does/not/exist' 7 | 8 | let err = '' 9 | try { 10 | await fetchOrThrow(url) 11 | } catch (e) { 12 | err = e.toString() 13 | } 14 | 15 | expect(err).toContain('failed to fetch: (404)') 16 | expect(err).toContain(url) 17 | }) 18 | 19 | }) 20 | -------------------------------------------------------------------------------- /test/tests/ssdMobilenetv1/expectedBoxes.ts: -------------------------------------------------------------------------------- 1 | import { IRect } from '../../../src'; 2 | import { sortBoxes } from '../../utils'; 3 | 4 | export const expectedSsdBoxes: IRect[] = sortBoxes([ 5 | { x: 48, y: 253, width: 104, height: 129 }, 6 | { x: 260, y: 227, width: 76, height: 117 }, 7 | { x: 466, y: 165, width: 88, height: 130 }, 8 | { x: 234, y: 36, width: 84, height: 119 }, 9 | { x: 577, y: 65, width: 84, height: 105 }, 10 | { x: 84, y: 14, width: 79, height: 132 } 11 | ]) -------------------------------------------------------------------------------- /src/classes/index.ts: -------------------------------------------------------------------------------- 1 | export * from './BoundingBox' 2 | export * from './Box' 3 | export * from './Dimensions' 4 | export * from './FaceDetection'; 5 | export * from './FaceLandmarks'; 6 | export * from './FaceLandmarks5'; 7 | export * from './FaceLandmarks68'; 8 | export * from './FaceMatch'; 9 | export * from './LabeledBox' 10 | export * from './LabeledFaceDescriptors'; 11 | export * from './ObjectDetection' 12 | export * from './Point' 13 | export * from './PredictedBox' 14 | export * from './Rect' -------------------------------------------------------------------------------- /src/faceProcessor/util.ts: -------------------------------------------------------------------------------- 1 | import * as tf from '@tensorflow/tfjs-core'; 2 | 3 | export function seperateWeightMaps(weightMap: tf.NamedTensorMap) { 4 | 5 | const featureExtractorMap: tf.NamedTensorMap = {} 6 | const classifierMap: tf.NamedTensorMap = {} 7 | 8 | Object.keys(weightMap).forEach(key => { 9 | const map = key.startsWith('fc') ? classifierMap : featureExtractorMap 10 | map[key] = weightMap[key] 11 | }) 12 | 13 | return { featureExtractorMap, classifierMap } 14 | 15 | } -------------------------------------------------------------------------------- /src/ssdMobilenetv1/pointwiseConvLayer.ts: -------------------------------------------------------------------------------- 1 | import * as tf from '@tensorflow/tfjs-core'; 2 | 3 | import { PointwiseConvParams } from './types'; 4 | 5 | export function pointwiseConvLayer( 6 | x: tf.Tensor4D, 7 | params: PointwiseConvParams, 8 | strides: [number, number] 9 | ) { 10 | return tf.tidy(() => { 11 | 12 | let out = tf.conv2d(x, params.filters, strides, 'same') 13 | out = tf.add(out, params.batch_norm_offset) 14 | return tf.clipByValue(out, 0, 6) 15 | 16 | }) 17 | } -------------------------------------------------------------------------------- /src/common/depthwiseSeparableConv.ts: -------------------------------------------------------------------------------- 1 | import * as tf from '@tensorflow/tfjs-core'; 2 | 3 | import { SeparableConvParams } from './types'; 4 | 5 | export function depthwiseSeparableConv( 6 | x: tf.Tensor4D, 7 | params: SeparableConvParams, 8 | stride: [number, number] 9 | ): tf.Tensor4D { 10 | return tf.tidy(() => { 11 | let out = tf.separableConv2d(x, params.depthwise_filter, params.pointwise_filter, stride, 'same') 12 | out = tf.add(out, params.bias) 13 | return out 14 | }) 15 | } -------------------------------------------------------------------------------- /src/common/loadConvParamsFactory.ts: -------------------------------------------------------------------------------- 1 | import * as tf from '@tensorflow/tfjs-core'; 2 | 3 | import { ConvParams } from './types'; 4 | 5 | export function loadConvParamsFactory(extractWeightEntry: (originalPath: string, paramRank: number) => T) { 6 | return function(prefix: string): ConvParams { 7 | const filters = extractWeightEntry(`${prefix}/filters`, 4) 8 | const bias = extractWeightEntry(`${prefix}/bias`, 1) 9 | 10 | return { filters, bias } 11 | } 12 | } -------------------------------------------------------------------------------- /test/expectedTinyFaceDetectorBoxes.ts: -------------------------------------------------------------------------------- 1 | import { IRect } from '../src'; 2 | import { sortBoxes } from './utils'; 3 | 4 | export const expectedTinyFaceDetectorBoxes: IRect[] = sortBoxes([ 5 | { x: 29, y: 264, width: 139, height: 137 }, 6 | { x: 224, y: 240, width: 147, height: 128 }, 7 | { x: 547, y: 81, width: 136, height: 114 }, 8 | { x: 214, y: 53, width: 124, height: 119 }, 9 | { x: 430, y: 183, width: 162, height: 143 }, 10 | { x: 54, y: 33, width: 134, height: 114 } 11 | ]) 12 | 13 | -------------------------------------------------------------------------------- /src/ageGenderNet/types.ts: -------------------------------------------------------------------------------- 1 | import * as tf from '@tensorflow/tfjs-core'; 2 | 3 | import { FCParams } from '../common'; 4 | 5 | export type AgeAndGenderPrediction = { 6 | age: number 7 | gender: Gender 8 | genderProbability: number 9 | } 10 | 11 | export enum Gender { 12 | FEMALE = 'female', 13 | MALE = 'male' 14 | } 15 | 16 | export type NetOutput = { age: tf.Tensor1D, gender: tf.Tensor2D } 17 | 18 | export type NetParams = { 19 | fc: { 20 | age: FCParams 21 | gender: FCParams 22 | } 23 | } -------------------------------------------------------------------------------- /src/ops/normalize.ts: -------------------------------------------------------------------------------- 1 | import * as tf from '@tensorflow/tfjs-core'; 2 | 3 | export function normalize(x: tf.Tensor4D, meanRgb: number[]): tf.Tensor4D { 4 | return tf.tidy(() => { 5 | const [r, g, b] = meanRgb 6 | const avg_r = tf.fill([...x.shape.slice(0, 3), 1], r) 7 | const avg_g = tf.fill([...x.shape.slice(0, 3), 1], g) 8 | const avg_b = tf.fill([...x.shape.slice(0, 3), 1], b) 9 | const avg_rgb = tf.concat([avg_r, avg_g, avg_b], 3) 10 | 11 | return tf.sub(x, avg_rgb) 12 | }) 13 | } -------------------------------------------------------------------------------- /src/dom/fetchImage.ts: -------------------------------------------------------------------------------- 1 | import { bufferToImage } from './bufferToImage'; 2 | import { fetchOrThrow } from './fetchOrThrow'; 3 | 4 | export async function fetchImage(uri: string): Promise { 5 | const res = await fetchOrThrow(uri) 6 | const blob = await (res).blob() 7 | 8 | if (!blob.type.startsWith('image/')) { 9 | throw new Error(`fetchImage - expected blob type to be of type image/*, instead have: ${blob.type}, for url: ${res.url}`) 10 | } 11 | return bufferToImage(blob) 12 | } 13 | -------------------------------------------------------------------------------- /src/ops/iou.ts: -------------------------------------------------------------------------------- 1 | import { Box } from '../classes/Box'; 2 | 3 | export function iou(box1: Box, box2: Box, isIOU: boolean = true) { 4 | const width = Math.max(0.0, Math.min(box1.right, box2.right) - Math.max(box1.left, box2.left)) 5 | const height = Math.max(0.0, Math.min(box1.bottom, box2.bottom) - Math.max(box1.top, box2.top)) 6 | const interSection = width * height 7 | 8 | return isIOU 9 | ? interSection / (box1.area + box2.area - interSection) 10 | : interSection / Math.min(box1.area, box2.area) 11 | } -------------------------------------------------------------------------------- /src/common/convLayer.ts: -------------------------------------------------------------------------------- 1 | import * as tf from '@tensorflow/tfjs-core'; 2 | 3 | import { ConvParams } from './types'; 4 | 5 | export function convLayer( 6 | x: tf.Tensor4D, 7 | params: ConvParams, 8 | padding: 'valid' | 'same' = 'same', 9 | withRelu: boolean = false 10 | ): tf.Tensor4D { 11 | return tf.tidy(() => { 12 | const out = tf.add( 13 | tf.conv2d(x, params.filters, [1, 1], padding), 14 | params.bias 15 | ) as tf.Tensor4D 16 | 17 | return withRelu ? tf.relu(out) : out 18 | }) 19 | } -------------------------------------------------------------------------------- /src/mtcnn/pyramidDown.ts: -------------------------------------------------------------------------------- 1 | import { CELL_SIZE } from './config' 2 | 3 | export function pyramidDown(minFaceSize: number, scaleFactor: number, dims: number[]): number[] { 4 | 5 | const [height, width] = dims 6 | const m = CELL_SIZE / minFaceSize 7 | 8 | const scales = [] 9 | 10 | let minLayer = Math.min(height, width) * m 11 | let exp = 0 12 | while (minLayer >= 12) { 13 | scales.push(m * Math.pow(scaleFactor, exp)) 14 | minLayer = minLayer * scaleFactor 15 | exp += 1 16 | } 17 | 18 | return scales 19 | } -------------------------------------------------------------------------------- /src/ops/minBbox.ts: -------------------------------------------------------------------------------- 1 | import { BoundingBox, IPoint } from '../classes'; 2 | 3 | export function minBbox(pts: IPoint[]): BoundingBox { 4 | const xs = pts.map(pt => pt.x) 5 | const ys = pts.map(pt => pt.y) 6 | const minX = xs.reduce((min, x) => x < min ? x : min, Infinity) 7 | const minY = ys.reduce((min, y) => y < min ? y : min, Infinity) 8 | const maxX = xs.reduce((max, x) => max < x ? x : max, 0) 9 | const maxY = ys.reduce((max, y) => max < y ? y : max, 0) 10 | 11 | return new BoundingBox(minX, minY, maxX, maxY) 12 | } 13 | -------------------------------------------------------------------------------- /src/common/extractWeightsFactory.ts: -------------------------------------------------------------------------------- 1 | export function extractWeightsFactory(weights: Float32Array) { 2 | let remainingWeights = weights 3 | 4 | function extractWeights(numWeights: number): Float32Array { 5 | const ret = remainingWeights.slice(0, numWeights) 6 | remainingWeights = remainingWeights.slice(numWeights) 7 | return ret 8 | } 9 | 10 | function getRemainingWeights(): Float32Array { 11 | return remainingWeights 12 | } 13 | 14 | return { 15 | extractWeights, 16 | getRemainingWeights 17 | } 18 | } -------------------------------------------------------------------------------- /src/env/types.ts: -------------------------------------------------------------------------------- 1 | export type FileSystem = { 2 | readFile: (filePath: string) => Promise 3 | } 4 | 5 | export type Environment = FileSystem & { 6 | Canvas: typeof HTMLCanvasElement 7 | CanvasRenderingContext2D: typeof CanvasRenderingContext2D 8 | Image: typeof HTMLImageElement 9 | ImageData: typeof ImageData 10 | Video: typeof HTMLVideoElement 11 | createCanvasElement: () => HTMLCanvasElement 12 | createImageElement: () => HTMLImageElement 13 | fetch: (url: string, init?: RequestInit) => Promise 14 | } 15 | -------------------------------------------------------------------------------- /src/dom/loadWeightMap.ts: -------------------------------------------------------------------------------- 1 | import * as tf from '@tensorflow/tfjs-core'; 2 | 3 | import { getModelUris } from '../common/getModelUris'; 4 | import { fetchJson } from './fetchJson'; 5 | 6 | export async function loadWeightMap( 7 | uri: string | undefined, 8 | defaultModelName: string, 9 | ): Promise { 10 | const { manifestUri, modelBaseUri } = getModelUris(uri, defaultModelName) 11 | 12 | const manifest = await fetchJson(manifestUri) 13 | 14 | return tf.io.loadWeights(manifest, modelBaseUri) 15 | } -------------------------------------------------------------------------------- /src/ssdMobilenetv1/index.ts: -------------------------------------------------------------------------------- 1 | import { SsdMobilenetv1 } from './SsdMobilenetv1'; 2 | 3 | export * from './SsdMobilenetv1'; 4 | export * from './SsdMobilenetv1Options'; 5 | 6 | export function createSsdMobilenetv1(weights: Float32Array) { 7 | const net = new SsdMobilenetv1() 8 | net.extractWeights(weights) 9 | return net 10 | } 11 | 12 | export function createFaceDetectionNet(weights: Float32Array) { 13 | return createSsdMobilenetv1(weights) 14 | } 15 | 16 | // alias for backward compatibily 17 | export class FaceDetectionNet extends SsdMobilenetv1 {} -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | sudo: required 2 | language: node_js 3 | node_js: 4 | #- "node" 5 | - "12" 6 | - "11" 7 | - "10" 8 | - "8" 9 | # node 6 is not compatible with tfjs-node 10 | # - "6" 11 | services: 12 | - xvfb 13 | env: 14 | global: 15 | - BACKEND_CPU=true EXCLUDE_UNCOMPRESSED=true 16 | matrix: 17 | - ENV=browser 18 | - ENV=node 19 | addons: 20 | chrome: stable 21 | install: npm install 22 | script: 23 | - if [ $ENV == 'browser' ]; then npm run test-browser; fi 24 | - if [ $ENV == 'node' ]; then npm run test-node; fi 25 | - npm run build -------------------------------------------------------------------------------- /src/tinyYolov2/depthwiseSeparableConv.ts: -------------------------------------------------------------------------------- 1 | import * as tf from '@tensorflow/tfjs-core'; 2 | 3 | import { SeparableConvParams } from '../common/types'; 4 | import { leaky } from './leaky'; 5 | 6 | export function depthwiseSeparableConv(x: tf.Tensor4D, params: SeparableConvParams): tf.Tensor4D { 7 | return tf.tidy(() => { 8 | let out = tf.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0]]) as tf.Tensor4D 9 | 10 | out = tf.separableConv2d(out, params.depthwise_filter, params.pointwise_filter, [1, 1], 'valid') 11 | out = tf.add(out, params.bias) 12 | 13 | return leaky(out) 14 | }) 15 | } -------------------------------------------------------------------------------- /test/tests/globalApi/consts.ts: -------------------------------------------------------------------------------- 1 | import { TinyFaceDetectorOptions } from '../../../src'; 2 | 3 | export const withNetArgs = { 4 | withAllFacesTinyFaceDetector: true, 5 | withFaceExpressionNet: { quantized: true }, 6 | withAgeGenderNet: { quantized: true } 7 | } 8 | 9 | export const expectedScores = [0.7, 0.82, 0.93, 0.86, 0.79, 0.84] 10 | 11 | export const deltas = { 12 | maxScoreDelta: 0.05, 13 | maxBoxDelta: 5, 14 | maxLandmarksDelta: 10, 15 | maxDescriptorDelta: 0.2 16 | } 17 | 18 | export const faceDetectorOptions = new TinyFaceDetectorOptions({ 19 | inputSize: 416 20 | }) -------------------------------------------------------------------------------- /src/dom/getMediaDimensions.ts: -------------------------------------------------------------------------------- 1 | import { Dimensions, IDimensions } from '../classes/Dimensions'; 2 | import { env } from '../env'; 3 | 4 | export function getMediaDimensions(input: HTMLImageElement | HTMLCanvasElement | HTMLVideoElement | IDimensions): Dimensions { 5 | 6 | const { Image, Video } = env.getEnv() 7 | 8 | if (input instanceof Image) { 9 | return new Dimensions(input.naturalWidth, input.naturalHeight) 10 | } 11 | if (input instanceof Video) { 12 | return new Dimensions(input.videoWidth, input.videoHeight) 13 | } 14 | return new Dimensions(input.width, input.height) 15 | } 16 | -------------------------------------------------------------------------------- /src/factories/WithFaceDetection.ts: -------------------------------------------------------------------------------- 1 | import { FaceDetection } from '../classes/FaceDetection'; 2 | 3 | export type WithFaceDetection = TSource & { 4 | detection: FaceDetection 5 | } 6 | 7 | export function isWithFaceDetection(obj: any): obj is WithFaceDetection<{}> { 8 | return obj['detection'] instanceof FaceDetection 9 | } 10 | 11 | export function extendWithFaceDetection< 12 | TSource 13 | > ( 14 | sourceObj: TSource, 15 | detection: FaceDetection 16 | ): WithFaceDetection { 17 | 18 | const extension = { detection } 19 | return Object.assign({}, sourceObj, extension) 20 | } 21 | -------------------------------------------------------------------------------- /src/globalApi/types.ts: -------------------------------------------------------------------------------- 1 | import { FaceDetection } from '../classes/FaceDetection'; 2 | import { TNetInput } from '../dom'; 3 | import { MtcnnOptions } from '../mtcnn/MtcnnOptions'; 4 | import { SsdMobilenetv1Options } from '../ssdMobilenetv1/SsdMobilenetv1Options'; 5 | import { TinyFaceDetectorOptions } from '../tinyFaceDetector/TinyFaceDetectorOptions'; 6 | import { TinyYolov2Options } from '../tinyYolov2'; 7 | 8 | export type FaceDetectionOptions = TinyFaceDetectorOptions | SsdMobilenetv1Options | MtcnnOptions | TinyYolov2Options 9 | 10 | export type FaceDetectionFunction = (input: TNetInput) => Promise -------------------------------------------------------------------------------- /src/tinyYolov2/convWithBatchNorm.ts: -------------------------------------------------------------------------------- 1 | import * as tf from '@tensorflow/tfjs-core'; 2 | 3 | import { leaky } from './leaky'; 4 | import { ConvWithBatchNorm } from './types'; 5 | 6 | export function convWithBatchNorm(x: tf.Tensor4D, params: ConvWithBatchNorm): tf.Tensor4D { 7 | return tf.tidy(() => { 8 | let out = tf.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0]]) as tf.Tensor4D 9 | 10 | out = tf.conv2d(out, params.conv.filters, [1, 1], 'valid') 11 | out = tf.sub(out, params.bn.sub) 12 | out = tf.mul(out, params.bn.truediv) 13 | out = tf.add(out, params.conv.bias) 14 | 15 | return leaky(out) 16 | }) 17 | } -------------------------------------------------------------------------------- /examples/examples-nodejs/commons/env.ts: -------------------------------------------------------------------------------- 1 | // import nodejs bindings to native tensorflow, 2 | // not required, but will speed up things drastically (python required) 3 | import '@tensorflow/tfjs-node'; 4 | 5 | import * as faceapi from 'face-api.js'; 6 | 7 | // implements nodejs wrappers for HTMLCanvasElement, HTMLImageElement, ImageData 8 | const canvas = require('canvas') 9 | 10 | // patch nodejs environment, we need to provide an implementation of 11 | // HTMLCanvasElement and HTMLImageElement 12 | const { Canvas, Image, ImageData } = canvas 13 | faceapi.env.monkeyPatch({ Canvas, Image, ImageData }) 14 | 15 | export { canvas } -------------------------------------------------------------------------------- /src/common/types.ts: -------------------------------------------------------------------------------- 1 | import * as tf from '@tensorflow/tfjs-core'; 2 | 3 | export type ExtractWeightsFunction = (numWeights: number) => Float32Array 4 | 5 | export type ParamMapping = { 6 | originalPath?: string 7 | paramPath: string 8 | } 9 | 10 | export type ConvParams = { 11 | filters: tf.Tensor4D 12 | bias: tf.Tensor1D 13 | } 14 | 15 | export type FCParams = { 16 | weights: tf.Tensor2D 17 | bias: tf.Tensor1D 18 | } 19 | 20 | export class SeparableConvParams { 21 | constructor( 22 | public depthwise_filter: tf.Tensor4D, 23 | public pointwise_filter: tf.Tensor4D, 24 | public bias: tf.Tensor1D 25 | ) {} 26 | } -------------------------------------------------------------------------------- /src/factories/WithFaceExpressions.ts: -------------------------------------------------------------------------------- 1 | import { FaceExpressions } from '../faceExpressionNet/FaceExpressions'; 2 | 3 | export type WithFaceExpressions = TSource & { 4 | expressions: FaceExpressions 5 | } 6 | 7 | export function isWithFaceExpressions(obj: any): obj is WithFaceExpressions<{}> { 8 | return obj['expressions'] instanceof FaceExpressions 9 | } 10 | 11 | export function extendWithFaceExpressions< 12 | TSource 13 | > ( 14 | sourceObj: TSource, 15 | expressions: FaceExpressions 16 | ): WithFaceExpressions { 17 | 18 | const extension = { expressions } 19 | return Object.assign({}, sourceObj, extension) 20 | } -------------------------------------------------------------------------------- /src/draw/drawContour.ts: -------------------------------------------------------------------------------- 1 | import { Point } from '../classes'; 2 | 3 | export function drawContour( 4 | ctx: CanvasRenderingContext2D, 5 | points: Point[], 6 | isClosed: boolean = false 7 | ) { 8 | ctx.beginPath() 9 | 10 | points.slice(1).forEach(({ x, y }, prevIdx) => { 11 | const from = points[prevIdx] 12 | ctx.moveTo(from.x, from.y) 13 | ctx.lineTo(x, y) 14 | }) 15 | 16 | if (isClosed) { 17 | const from = points[points.length - 1] 18 | const to = points[0] 19 | if (!from || !to) { 20 | return 21 | } 22 | 23 | ctx.moveTo(from.x, from.y) 24 | ctx.lineTo(to.x, to.y) 25 | } 26 | 27 | ctx.stroke() 28 | } -------------------------------------------------------------------------------- /test/tests/classes/BoundingBox.test.ts: -------------------------------------------------------------------------------- 1 | import { BoundingBox } from '../../../src/classes/BoundingBox'; 2 | 3 | describe('BoundingBox', () => { 4 | 5 | describe('constructor', () => { 6 | 7 | it('properties', () => { 8 | const box = new BoundingBox(5, 10, 15, 20) 9 | expect(box.left).toEqual(5) 10 | expect(box.x).toEqual(5) 11 | expect(box.top).toEqual(10) 12 | expect(box.y).toEqual(10) 13 | expect(box.right).toEqual(15) 14 | expect(box.bottom).toEqual(20) 15 | expect(box.width).toEqual(10) 16 | expect(box.height).toEqual(10) 17 | expect(box.area).toEqual(100) 18 | }) 19 | 20 | }) 21 | 22 | }) -------------------------------------------------------------------------------- /src/classes/FaceMatch.ts: -------------------------------------------------------------------------------- 1 | import { round } from '../utils'; 2 | 3 | export interface IFaceMatch { 4 | label: string 5 | distance: number 6 | } 7 | 8 | export class FaceMatch implements IFaceMatch { 9 | private _label: string 10 | private _distance: number 11 | 12 | constructor(label: string, distance: number) { 13 | this._label = label 14 | this._distance = distance 15 | } 16 | 17 | public get label(): string { return this._label } 18 | public get distance(): number { return this._distance } 19 | 20 | public toString(withDistance: boolean = true): string { 21 | return `${this.label}${withDistance ? ` (${round(this.distance)})` : ''}` 22 | } 23 | } -------------------------------------------------------------------------------- /examples/examples-nodejs/faceDetection.ts: -------------------------------------------------------------------------------- 1 | import * as faceapi from 'face-api.js'; 2 | 3 | import { canvas, faceDetectionNet, faceDetectionOptions, saveFile } from './commons'; 4 | 5 | async function run() { 6 | 7 | await faceDetectionNet.loadFromDisk('../../weights') 8 | 9 | const img = await canvas.loadImage('../images/bbt1.jpg') 10 | const detections = await faceapi.detectAllFaces(img, faceDetectionOptions) 11 | 12 | const out = faceapi.createCanvasFromMedia(img) as any 13 | faceapi.draw.drawDetections(out, detections) 14 | 15 | saveFile('faceDetection.jpg', out.toBuffer('image/jpeg')) 16 | console.log('done, saved results to out/faceDetection.jpg') 17 | } 18 | 19 | run() -------------------------------------------------------------------------------- /src/mtcnn/PNet.ts: -------------------------------------------------------------------------------- 1 | import * as tf from '@tensorflow/tfjs-core'; 2 | 3 | import { convLayer } from '../common'; 4 | import { sharedLayer } from './sharedLayers'; 5 | import { PNetParams } from './types'; 6 | 7 | export function PNet(x: tf.Tensor4D, params: PNetParams): { prob: tf.Tensor4D, regions: tf.Tensor4D } { 8 | return tf.tidy(() => { 9 | 10 | let out = sharedLayer(x, params, true) 11 | const conv = convLayer(out, params.conv4_1, 'valid') 12 | const max = tf.expandDims(tf.max(conv, 3), 3) 13 | const prob = tf.softmax(tf.sub(conv, max), 3) as tf.Tensor4D 14 | const regions = convLayer(out, params.conv4_2, 'valid') 15 | 16 | return { prob, regions } 17 | }) 18 | } -------------------------------------------------------------------------------- /src/common/extractWeightEntryFactory.ts: -------------------------------------------------------------------------------- 1 | import { isTensor } from '../utils'; 2 | import { ParamMapping } from './types'; 3 | 4 | export function extractWeightEntryFactory(weightMap: any, paramMappings: ParamMapping[]) { 5 | 6 | return function (originalPath: string, paramRank: number, mappedPath?: string): T { 7 | const tensor = weightMap[originalPath] 8 | 9 | if (!isTensor(tensor, paramRank)) { 10 | throw new Error(`expected weightMap[${originalPath}] to be a Tensor${paramRank}D, instead have ${tensor}`) 11 | } 12 | 13 | paramMappings.push( 14 | { originalPath, paramPath: mappedPath || originalPath } 15 | ) 16 | 17 | return tensor 18 | } 19 | 20 | } 21 | -------------------------------------------------------------------------------- /src/dom/imageTensorToCanvas.ts: -------------------------------------------------------------------------------- 1 | import * as tf from '@tensorflow/tfjs-core'; 2 | 3 | import { env } from '../env'; 4 | import { isTensor4D } from '../utils'; 5 | 6 | export async function imageTensorToCanvas( 7 | imgTensor: tf.Tensor, 8 | canvas?: HTMLCanvasElement 9 | ): Promise { 10 | 11 | const targetCanvas = canvas || env.getEnv().createCanvasElement() 12 | 13 | const [height, width, numChannels] = imgTensor.shape.slice(isTensor4D(imgTensor) ? 1 : 0) 14 | const imgTensor3D = tf.tidy(() => imgTensor.as3D(height, width, numChannels).toInt()) 15 | await tf.browser.toPixels(imgTensor3D, targetCanvas) 16 | 17 | imgTensor3D.dispose() 18 | 19 | return targetCanvas 20 | } -------------------------------------------------------------------------------- /src/globalApi/detectFaces.ts: -------------------------------------------------------------------------------- 1 | import { TNetInput } from '../dom'; 2 | import { SsdMobilenetv1Options } from '../ssdMobilenetv1/SsdMobilenetv1Options'; 3 | import { DetectAllFacesTask, DetectSingleFaceTask } from './DetectFacesTasks'; 4 | import { FaceDetectionOptions } from './types'; 5 | 6 | export function detectSingleFace( 7 | input: TNetInput, 8 | options: FaceDetectionOptions = new SsdMobilenetv1Options() 9 | ): DetectSingleFaceTask { 10 | return new DetectSingleFaceTask(input, options) 11 | } 12 | 13 | export function detectAllFaces( 14 | input: TNetInput, 15 | options: FaceDetectionOptions = new SsdMobilenetv1Options() 16 | ): DetectAllFacesTask { 17 | return new DetectAllFacesTask(input, options) 18 | } -------------------------------------------------------------------------------- /examples/examples-nodejs/commons/faceDetection.ts: -------------------------------------------------------------------------------- 1 | import * as faceapi from 'face-api.js'; 2 | 3 | export const faceDetectionNet = faceapi.nets.ssdMobilenetv1 4 | // export const faceDetectionNet = tinyFaceDetector 5 | 6 | // SsdMobilenetv1Options 7 | const minConfidence = 0.5 8 | 9 | // TinyFaceDetectorOptions 10 | const inputSize = 408 11 | const scoreThreshold = 0.5 12 | 13 | function getFaceDetectorOptions(net: faceapi.NeuralNetwork) { 14 | return net === faceapi.nets.ssdMobilenetv1 15 | ? new faceapi.SsdMobilenetv1Options({ minConfidence }) 16 | : new faceapi.TinyFaceDetectorOptions({ inputSize, scoreThreshold }) 17 | } 18 | 19 | export const faceDetectionOptions = getFaceDetectorOptions(faceDetectionNet) -------------------------------------------------------------------------------- /src/faceLandmarkNet/FaceLandmark68Net.ts: -------------------------------------------------------------------------------- 1 | import { FaceFeatureExtractor } from '../faceFeatureExtractor/FaceFeatureExtractor'; 2 | import { FaceFeatureExtractorParams } from '../faceFeatureExtractor/types'; 3 | import { FaceLandmark68NetBase } from './FaceLandmark68NetBase'; 4 | 5 | export class FaceLandmark68Net extends FaceLandmark68NetBase { 6 | 7 | constructor(faceFeatureExtractor: FaceFeatureExtractor = new FaceFeatureExtractor()) { 8 | super('FaceLandmark68Net', faceFeatureExtractor) 9 | } 10 | 11 | protected getDefaultModelName(): string { 12 | return 'face_landmark_68_model' 13 | } 14 | 15 | protected getClassifierChannelsIn(): number { 16 | return 256 17 | } 18 | } -------------------------------------------------------------------------------- /test/tests/dom/fetchImage.browser.test.ts: -------------------------------------------------------------------------------- 1 | import { fetchImage } from '../../../src'; 2 | 3 | describe('fetchImage', () => { 4 | 5 | it('invalid mime type', async () => { 6 | const url = 'base/test/data/boxes.json' 7 | 8 | let err = '' 9 | try { 10 | await fetchImage(url) 11 | } catch (e) { 12 | err = e.toString() 13 | } 14 | 15 | expect(err).toContain('fetchImage - expected blob type to be of type image/*, instead have: application/json') 16 | expect(err).toContain(url) 17 | }) 18 | 19 | it('fetches image', async () => { 20 | const url = 'base/test/images/white.png' 21 | const img = await fetchImage(url) 22 | expect(img instanceof HTMLImageElement).toBe(true) 23 | }) 24 | 25 | }) 26 | -------------------------------------------------------------------------------- /src/factories/WithGender.ts: -------------------------------------------------------------------------------- 1 | import { Gender } from '../ageGenderNet/types'; 2 | import { isValidProbablitiy } from '../utils'; 3 | 4 | export type WithGender = TSource & { 5 | gender: Gender 6 | genderProbability: number 7 | } 8 | 9 | export function isWithGender(obj: any): obj is WithGender<{}> { 10 | return (obj['gender'] === Gender.MALE || obj['gender'] === Gender.FEMALE) 11 | && isValidProbablitiy(obj['genderProbability']) 12 | } 13 | 14 | export function extendWithGender< 15 | TSource 16 | > ( 17 | sourceObj: TSource, 18 | gender: Gender, 19 | genderProbability: number 20 | ): WithGender { 21 | 22 | const extension = { gender, genderProbability } 23 | return Object.assign({}, sourceObj, extension) 24 | } -------------------------------------------------------------------------------- /src/classes/LabeledBox.ts: -------------------------------------------------------------------------------- 1 | import { isValidNumber } from '../utils'; 2 | import { IBoundingBox } from './BoundingBox'; 3 | import { Box } from './Box'; 4 | import { IRect } from './Rect'; 5 | 6 | export class LabeledBox extends Box { 7 | 8 | public static assertIsValidLabeledBox(box: any, callee: string) { 9 | Box.assertIsValidBox(box, callee) 10 | 11 | if (!isValidNumber(box.label)) { 12 | throw new Error(`${callee} - expected property label (${box.label}) to be a number`) 13 | } 14 | } 15 | 16 | private _label: number 17 | 18 | constructor(box: IBoundingBox | IRect | any, label: number) { 19 | super(box) 20 | this._label = label 21 | } 22 | 23 | public get label(): number { return this._label } 24 | 25 | } -------------------------------------------------------------------------------- /src/faceLandmarkNet/FaceLandmark68TinyNet.ts: -------------------------------------------------------------------------------- 1 | import { TinyFaceFeatureExtractor } from '../faceFeatureExtractor/TinyFaceFeatureExtractor'; 2 | import { TinyFaceFeatureExtractorParams } from '../faceFeatureExtractor/types'; 3 | import { FaceLandmark68NetBase } from './FaceLandmark68NetBase'; 4 | 5 | export class FaceLandmark68TinyNet extends FaceLandmark68NetBase { 6 | 7 | constructor(faceFeatureExtractor: TinyFaceFeatureExtractor = new TinyFaceFeatureExtractor()) { 8 | super('FaceLandmark68TinyNet', faceFeatureExtractor) 9 | } 10 | 11 | protected getDefaultModelName(): string { 12 | return 'face_landmark_68_tiny_model' 13 | } 14 | 15 | protected getClassifierChannelsIn(): number { 16 | return 128 17 | } 18 | } -------------------------------------------------------------------------------- /src/ssdMobilenetv1/boxPredictionLayer.ts: -------------------------------------------------------------------------------- 1 | import * as tf from '@tensorflow/tfjs-core'; 2 | 3 | import { convLayer } from '../common'; 4 | import { BoxPredictionParams } from './types'; 5 | 6 | 7 | export function boxPredictionLayer( 8 | x: tf.Tensor4D, 9 | params: BoxPredictionParams 10 | ) { 11 | return tf.tidy(() => { 12 | 13 | const batchSize = x.shape[0] 14 | 15 | const boxPredictionEncoding = tf.reshape( 16 | convLayer(x, params.box_encoding_predictor), 17 | [batchSize, -1, 1, 4] 18 | ) 19 | const classPrediction = tf.reshape( 20 | convLayer(x, params.class_predictor), 21 | [batchSize, -1, 3] 22 | ) 23 | 24 | return { 25 | boxPredictionEncoding, 26 | classPrediction 27 | } 28 | }) 29 | } -------------------------------------------------------------------------------- /src/dom/index.ts: -------------------------------------------------------------------------------- 1 | export * from './awaitMediaLoaded' 2 | export * from './bufferToImage' 3 | export * from './createCanvas' 4 | export * from './extractFaces' 5 | export * from './extractFaceTensors' 6 | export * from './fetchImage' 7 | export * from './fetchJson' 8 | export * from './fetchNetWeights' 9 | export * from './fetchOrThrow' 10 | export * from './getContext2dOrThrow' 11 | export * from './getMediaDimensions' 12 | export * from './imageTensorToCanvas' 13 | export * from './imageToSquare' 14 | export * from './isMediaElement' 15 | export * from './isMediaLoaded' 16 | export * from './loadWeightMap' 17 | export * from './matchDimensions' 18 | export * from './NetInput' 19 | export * from './resolveInput' 20 | export * from './toNetInput' 21 | export * from './types' -------------------------------------------------------------------------------- /src/classes/FaceDetection.ts: -------------------------------------------------------------------------------- 1 | import { Box } from './Box'; 2 | import { IDimensions } from './Dimensions'; 3 | import { ObjectDetection } from './ObjectDetection'; 4 | import { Rect } from './Rect'; 5 | 6 | export interface IFaceDetecion { 7 | score: number 8 | box: Box 9 | } 10 | 11 | export class FaceDetection extends ObjectDetection implements IFaceDetecion { 12 | constructor( 13 | score: number, 14 | relativeBox: Rect, 15 | imageDims: IDimensions 16 | ) { 17 | super(score, score, '', relativeBox, imageDims) 18 | } 19 | 20 | public forSize(width: number, height: number): FaceDetection { 21 | const { score, relativeBox, imageDims } = super.forSize(width, height) 22 | return new FaceDetection(score, relativeBox, imageDims) 23 | } 24 | } -------------------------------------------------------------------------------- /src/xception/types.ts: -------------------------------------------------------------------------------- 1 | import { ConvParams, SeparableConvParams } from '../common'; 2 | 3 | export type ReductionBlockParams = { 4 | separable_conv0: SeparableConvParams 5 | separable_conv1: SeparableConvParams 6 | expansion_conv: ConvParams 7 | } 8 | 9 | export type MainBlockParams = { 10 | separable_conv0: SeparableConvParams 11 | separable_conv1: SeparableConvParams 12 | separable_conv2: SeparableConvParams 13 | } 14 | 15 | export type TinyXceptionParams = { 16 | entry_flow: { 17 | conv_in: ConvParams 18 | reduction_block_0: ReductionBlockParams 19 | reduction_block_1: ReductionBlockParams 20 | } 21 | middle_flow: any, 22 | exit_flow: { 23 | reduction_block: ReductionBlockParams 24 | separable_conv: SeparableConvParams 25 | } 26 | } -------------------------------------------------------------------------------- /src/dom/getContext2dOrThrow.ts: -------------------------------------------------------------------------------- 1 | import { env } from '../env'; 2 | import { resolveInput } from './resolveInput'; 3 | 4 | export function getContext2dOrThrow(canvasArg: string | HTMLCanvasElement | CanvasRenderingContext2D): CanvasRenderingContext2D { 5 | 6 | const { Canvas, CanvasRenderingContext2D } = env.getEnv() 7 | 8 | if (canvasArg instanceof CanvasRenderingContext2D) { 9 | return canvasArg 10 | } 11 | 12 | const canvas = resolveInput(canvasArg) 13 | 14 | if (!(canvas instanceof Canvas)) { 15 | throw new Error('resolveContext2d - expected canvas to be of instance of Canvas') 16 | } 17 | 18 | const ctx = canvas.getContext('2d') 19 | if (!ctx) { 20 | throw new Error('resolveContext2d - canvas 2d context is null') 21 | } 22 | 23 | return ctx 24 | } -------------------------------------------------------------------------------- /src/dom/bufferToImage.ts: -------------------------------------------------------------------------------- 1 | import { env } from '../env'; 2 | 3 | export function bufferToImage(buf: Blob): Promise { 4 | return new Promise((resolve, reject) => { 5 | if (!(buf instanceof Blob)) { 6 | return reject('bufferToImage - expected buf to be of type: Blob') 7 | } 8 | 9 | const reader = new FileReader() 10 | reader.onload = () => { 11 | if (typeof reader.result !== 'string') { 12 | return reject('bufferToImage - expected reader.result to be a string, in onload') 13 | } 14 | 15 | const img = env.getEnv().createImageElement() 16 | img.onload = () => resolve(img) 17 | img.onerror = reject 18 | img.src = reader.result 19 | } 20 | reader.onerror = reject 21 | reader.readAsDataURL(buf) 22 | }) 23 | } -------------------------------------------------------------------------------- /src/env/createFileSystem.ts: -------------------------------------------------------------------------------- 1 | import { FileSystem } from './types'; 2 | 3 | export function createFileSystem(fs?: any): FileSystem { 4 | 5 | let requireFsError = '' 6 | 7 | if (!fs) { 8 | try { 9 | fs = require('fs') 10 | } catch (err) { 11 | requireFsError = err.toString() 12 | } 13 | } 14 | 15 | const readFile = fs 16 | ? function(filePath: string) { 17 | return new Promise((res, rej) => { 18 | fs.readFile(filePath, function(err: any, buffer: Buffer) { 19 | return err ? rej(err) : res(buffer) 20 | }) 21 | }) 22 | } 23 | : function() { 24 | throw new Error(`readFile - failed to require fs in nodejs environment with error: ${requireFsError}`) 25 | } 26 | 27 | return { 28 | readFile 29 | } 30 | } -------------------------------------------------------------------------------- /src/env/createBrowserEnv.ts: -------------------------------------------------------------------------------- 1 | import { Environment } from './types'; 2 | 3 | export function createBrowserEnv(): Environment { 4 | 5 | const fetch = window['fetch'] || function() { 6 | throw new Error('fetch - missing fetch implementation for browser environment') 7 | } 8 | 9 | const readFile = function() { 10 | throw new Error('readFile - filesystem not available for browser environment') 11 | } 12 | 13 | return { 14 | Canvas: HTMLCanvasElement, 15 | CanvasRenderingContext2D: CanvasRenderingContext2D, 16 | Image: HTMLImageElement, 17 | ImageData: ImageData, 18 | Video: HTMLVideoElement, 19 | createCanvasElement: () => document.createElement('canvas'), 20 | createImageElement: () => document.createElement('img'), 21 | fetch, 22 | readFile 23 | } 24 | } -------------------------------------------------------------------------------- /src/tinyYolov2/const.ts: -------------------------------------------------------------------------------- 1 | import { Point } from '../classes'; 2 | 3 | export const IOU_THRESHOLD = 0.4 4 | 5 | export const BOX_ANCHORS = [ 6 | new Point(0.738768, 0.874946), 7 | new Point(2.42204, 2.65704), 8 | new Point(4.30971, 7.04493), 9 | new Point(10.246, 4.59428), 10 | new Point(12.6868, 11.8741) 11 | ] 12 | 13 | export const BOX_ANCHORS_SEPARABLE = [ 14 | new Point(1.603231, 2.094468), 15 | new Point(6.041143, 7.080126), 16 | new Point(2.882459, 3.518061), 17 | new Point(4.266906, 5.178857), 18 | new Point(9.041765, 10.66308) 19 | ] 20 | 21 | export const MEAN_RGB_SEPARABLE: [number, number, number] = [117.001, 114.697, 97.404] 22 | 23 | export const DEFAULT_MODEL_NAME = 'tiny_yolov2_model' 24 | export const DEFAULT_MODEL_NAME_SEPARABLE_CONV = 'tiny_yolov2_separable_conv_model' -------------------------------------------------------------------------------- /test/tests/factories/WithFaceDetection.test.ts: -------------------------------------------------------------------------------- 1 | import { extendWithFaceDetection, FaceDetection, Rect } from '../../../src'; 2 | 3 | const detection = new FaceDetection(1.0, new Rect(0, 0, 0.5, 0.5), { width: 100, height: 100 }) 4 | 5 | describe('extendWithFaceDetection', () => { 6 | 7 | it('returns WithFaceDetection', () => { 8 | 9 | const withFaceDetection = extendWithFaceDetection({}, detection) 10 | expect(withFaceDetection.detection).toEqual(detection) 11 | 12 | }) 13 | 14 | it('extends source object', () => { 15 | 16 | const srcProp = { foo: true } 17 | 18 | const withFaceDetection = extendWithFaceDetection({ srcProp }, detection) 19 | expect(withFaceDetection.detection).toEqual(detection) 20 | expect(withFaceDetection.srcProp).toEqual(srcProp) 21 | 22 | }) 23 | 24 | }) 25 | -------------------------------------------------------------------------------- /src/mtcnn/sharedLayers.ts: -------------------------------------------------------------------------------- 1 | import * as tf from '@tensorflow/tfjs-core'; 2 | 3 | import { convLayer } from '../common'; 4 | import { prelu } from './prelu'; 5 | import { SharedParams } from './types'; 6 | 7 | export function sharedLayer(x: tf.Tensor4D, params: SharedParams, isPnet: boolean = false) { 8 | return tf.tidy(() => { 9 | 10 | let out = convLayer(x, params.conv1, 'valid') 11 | out = prelu(out, params.prelu1_alpha) 12 | out = tf.maxPool(out, isPnet ? [2, 2]: [3, 3], [2, 2], 'same') 13 | out = convLayer(out, params.conv2, 'valid') 14 | out = prelu(out, params.prelu2_alpha) 15 | out = isPnet ? out : tf.maxPool(out, [3, 3], [2, 2], 'valid') 16 | out = convLayer(out, params.conv3, 'valid') 17 | out = prelu(out, params.prelu3_alpha) 18 | 19 | return out 20 | }) 21 | } -------------------------------------------------------------------------------- /src/classes/Dimensions.ts: -------------------------------------------------------------------------------- 1 | import { isValidNumber } from '../utils'; 2 | 3 | export interface IDimensions { 4 | width: number 5 | height: number 6 | } 7 | 8 | export class Dimensions implements IDimensions { 9 | 10 | private _width: number 11 | private _height: number 12 | 13 | constructor(width: number, height: number) { 14 | if (!isValidNumber(width) || !isValidNumber(height)) { 15 | throw new Error(`Dimensions.constructor - expected width and height to be valid numbers, instead have ${JSON.stringify({ width, height })}`) 16 | } 17 | 18 | this._width = width 19 | this._height = height 20 | } 21 | 22 | public get width(): number { return this._width } 23 | public get height(): number { return this._height } 24 | 25 | public reverse(): Dimensions { 26 | return new Dimensions(1 / this.width, 1 / this.height) 27 | } 28 | } -------------------------------------------------------------------------------- /src/faceProcessor/extractParams.ts: -------------------------------------------------------------------------------- 1 | import { extractFCParamsFactory, extractWeightsFactory, ParamMapping } from '../common'; 2 | import { NetParams } from './types'; 3 | 4 | export function extractParams(weights: Float32Array, channelsIn: number, channelsOut: number): { params: NetParams, paramMappings: ParamMapping[] } { 5 | 6 | const paramMappings: ParamMapping[] = [] 7 | 8 | const { 9 | extractWeights, 10 | getRemainingWeights 11 | } = extractWeightsFactory(weights) 12 | 13 | const extractFCParams = extractFCParamsFactory(extractWeights, paramMappings) 14 | 15 | const fc = extractFCParams(channelsIn, channelsOut, 'fc') 16 | 17 | if (getRemainingWeights().length !== 0) { 18 | throw new Error(`weights remaing after extract: ${getRemainingWeights().length}`) 19 | } 20 | 21 | return { 22 | paramMappings, 23 | params: { fc } 24 | } 25 | } -------------------------------------------------------------------------------- /src/common/extractFCParamsFactory.ts: -------------------------------------------------------------------------------- 1 | import * as tf from '@tensorflow/tfjs-core'; 2 | 3 | import { ExtractWeightsFunction, FCParams, ParamMapping } from './types'; 4 | 5 | 6 | export function extractFCParamsFactory( 7 | extractWeights: ExtractWeightsFunction, 8 | paramMappings: ParamMapping[] 9 | ) { 10 | 11 | return function( 12 | channelsIn: number, 13 | channelsOut: number, 14 | mappedPrefix: string 15 | ): FCParams { 16 | 17 | const fc_weights = tf.tensor2d(extractWeights(channelsIn * channelsOut), [channelsIn, channelsOut]) 18 | const fc_bias = tf.tensor1d(extractWeights(channelsOut)) 19 | 20 | paramMappings.push( 21 | { paramPath: `${mappedPrefix}/weights` }, 22 | { paramPath: `${mappedPrefix}/bias` } 23 | ) 24 | 25 | return { 26 | weights: fc_weights, 27 | bias: fc_bias 28 | } 29 | } 30 | 31 | } 32 | -------------------------------------------------------------------------------- /src/index.ts: -------------------------------------------------------------------------------- 1 | import * as tf from '@tensorflow/tfjs-core'; 2 | 3 | import * as draw from './draw'; 4 | import * as utils from './utils'; 5 | 6 | export { 7 | draw, 8 | utils, 9 | tf 10 | } 11 | 12 | export * from './ageGenderNet/index'; 13 | export * from './classes/index'; 14 | export * from './dom/index' 15 | export * from './env/index'; 16 | export * from './faceExpressionNet/index'; 17 | export * from './faceLandmarkNet/index'; 18 | export * from './faceRecognitionNet/index'; 19 | export * from './factories/index'; 20 | export * from './globalApi/index'; 21 | export * from './mtcnn/index'; 22 | export * from './ops/index'; 23 | export * from './ssdMobilenetv1/index'; 24 | export * from './tinyFaceDetector/index'; 25 | export * from './tinyYolov2/index'; 26 | 27 | export * from './euclideanDistance'; 28 | export * from './NeuralNetwork'; 29 | export * from './resizeResults'; -------------------------------------------------------------------------------- /src/ageGenderNet/extractParams.ts: -------------------------------------------------------------------------------- 1 | import { extractFCParamsFactory, extractWeightsFactory, ParamMapping } from '../common'; 2 | import { NetParams } from './types'; 3 | 4 | export function extractParams(weights: Float32Array): { params: NetParams, paramMappings: ParamMapping[] } { 5 | 6 | const paramMappings: ParamMapping[] = [] 7 | 8 | const { 9 | extractWeights, 10 | getRemainingWeights 11 | } = extractWeightsFactory(weights) 12 | 13 | const extractFCParams = extractFCParamsFactory(extractWeights, paramMappings) 14 | 15 | const age = extractFCParams(512, 1, 'fc/age') 16 | const gender = extractFCParams(512, 2, 'fc/gender') 17 | 18 | if (getRemainingWeights().length !== 0) { 19 | throw new Error(`weights remaing after extract: ${getRemainingWeights().length}`) 20 | } 21 | 22 | return { 23 | paramMappings, 24 | params: { fc: { age, gender } } 25 | } 26 | } -------------------------------------------------------------------------------- /examples/examples-nodejs/faceLandmarkDetection.ts: -------------------------------------------------------------------------------- 1 | import * as faceapi from 'face-api.js'; 2 | 3 | import { canvas, faceDetectionNet, faceDetectionOptions, saveFile } from './commons'; 4 | 5 | async function run() { 6 | 7 | await faceDetectionNet.loadFromDisk('../../weights') 8 | await faceapi.nets.faceLandmark68Net.loadFromDisk('../../weights') 9 | 10 | const img = await canvas.loadImage('../images/bbt1.jpg') 11 | const results = await faceapi.detectAllFaces(img, faceDetectionOptions) 12 | .withFaceLandmarks() 13 | 14 | const out = faceapi.createCanvasFromMedia(img) as any 15 | faceapi.draw.drawDetections(out, results.map(res => res.detection)) 16 | faceapi.draw.drawFaceLandmarks(out, results.map(res => res.landmarks)) 17 | 18 | saveFile('faceLandmarkDetection.jpg', out.toBuffer('image/jpeg')) 19 | console.log('done, saved results to out/faceLandmarkDetection.jpg') 20 | } 21 | 22 | run() -------------------------------------------------------------------------------- /src/common/extractConvParamsFactory.ts: -------------------------------------------------------------------------------- 1 | import * as tf from '@tensorflow/tfjs-core'; 2 | 3 | import { ConvParams, ExtractWeightsFunction, ParamMapping } from './types'; 4 | 5 | export function extractConvParamsFactory( 6 | extractWeights: ExtractWeightsFunction, 7 | paramMappings: ParamMapping[] 8 | ) { 9 | 10 | return function( 11 | channelsIn: number, 12 | channelsOut: number, 13 | filterSize: number, 14 | mappedPrefix: string 15 | ): ConvParams { 16 | 17 | const filters = tf.tensor4d( 18 | extractWeights(channelsIn * channelsOut * filterSize * filterSize), 19 | [filterSize, filterSize, channelsIn, channelsOut] 20 | ) 21 | const bias = tf.tensor1d(extractWeights(channelsOut)) 22 | 23 | paramMappings.push( 24 | { paramPath: `${mappedPrefix}/filters` }, 25 | { paramPath: `${mappedPrefix}/bias` } 26 | ) 27 | 28 | return { filters, bias } 29 | } 30 | 31 | } 32 | -------------------------------------------------------------------------------- /src/faceFeatureExtractor/extractParamsFromWeigthMapTiny.ts: -------------------------------------------------------------------------------- 1 | import * as tf from '@tensorflow/tfjs-core'; 2 | 3 | import { disposeUnusedWeightTensors, ParamMapping } from '../common'; 4 | import { loadParamsFactory } from './loadParamsFactory'; 5 | import { TinyFaceFeatureExtractorParams } from './types'; 6 | 7 | export function extractParamsFromWeigthMapTiny( 8 | weightMap: tf.NamedTensorMap 9 | ): { params: TinyFaceFeatureExtractorParams, paramMappings: ParamMapping[] } { 10 | 11 | const paramMappings: ParamMapping[] = [] 12 | 13 | const { 14 | extractDenseBlock3Params 15 | } = loadParamsFactory(weightMap, paramMappings) 16 | 17 | const params = { 18 | dense0: extractDenseBlock3Params('dense0', true), 19 | dense1: extractDenseBlock3Params('dense1'), 20 | dense2: extractDenseBlock3Params('dense2') 21 | } 22 | 23 | disposeUnusedWeightTensors(weightMap, paramMappings) 24 | 25 | return { params, paramMappings } 26 | } -------------------------------------------------------------------------------- /test/expectFaceDetections.ts: -------------------------------------------------------------------------------- 1 | import { IRect } from '../src'; 2 | import { FaceDetection } from '../src/classes/FaceDetection'; 3 | import { expectRectClose, sortFaceDetections } from './utils'; 4 | 5 | export function expectFaceDetections( 6 | results: FaceDetection[], 7 | allExpectedFaceDetections: IRect[], 8 | expectedScores: number[], 9 | maxScoreDelta: number, 10 | maxBoxDelta: number 11 | ) { 12 | 13 | const expectedDetections = expectedScores 14 | .map((score, i) => ({ 15 | score, 16 | ...allExpectedFaceDetections[i] 17 | })) 18 | .filter(expected => expected.score !== -1) 19 | 20 | const sortedResults = sortFaceDetections(results) 21 | 22 | expectedDetections.forEach((expectedDetection, i) => { 23 | const det = sortedResults[i] 24 | expect(Math.abs(det.score - expectedDetection.score)).toBeLessThan(maxScoreDelta) 25 | expectRectClose(det.box, expectedDetection, maxBoxDelta) 26 | }) 27 | } -------------------------------------------------------------------------------- /src/dom/awaitMediaLoaded.ts: -------------------------------------------------------------------------------- 1 | import { env } from '../env'; 2 | import { isMediaLoaded } from './isMediaLoaded'; 3 | 4 | export function awaitMediaLoaded(media: HTMLImageElement | HTMLVideoElement | HTMLCanvasElement) { 5 | 6 | return new Promise((resolve, reject) => { 7 | if (media instanceof env.getEnv().Canvas || isMediaLoaded(media)) { 8 | return resolve() 9 | } 10 | 11 | function onLoad(e: Event) { 12 | if (!e.currentTarget) return 13 | e.currentTarget.removeEventListener('load', onLoad) 14 | e.currentTarget.removeEventListener('error', onError) 15 | resolve(e) 16 | } 17 | 18 | function onError(e: Event) { 19 | if (!e.currentTarget) return 20 | e.currentTarget.removeEventListener('load', onLoad) 21 | e.currentTarget.removeEventListener('error', onError) 22 | reject(e) 23 | } 24 | 25 | media.addEventListener('load', onLoad) 26 | media.addEventListener('error', onError) 27 | }) 28 | } -------------------------------------------------------------------------------- /test/tests/ops/iou.test.ts: -------------------------------------------------------------------------------- 1 | import * as tf from '@tensorflow/tfjs-core'; 2 | 3 | import { iou, Rect } from '../../../src'; 4 | 5 | describe('iou', () => { 6 | 7 | it('should be 1.0', () => tf.tidy(() => { 8 | 9 | const box = new Rect(0, 0, 20, 20) 10 | 11 | expect(iou(box, box)).toEqual(1) 12 | 13 | })) 14 | 15 | it('should be 0', () => tf.tidy(() => { 16 | 17 | const box1 = new Rect(0, 0, 20, 20) 18 | const box2 = new Rect(20, 20, 20, 20) 19 | 20 | expect(iou(box1, box2)).toEqual(0) 21 | 22 | })) 23 | 24 | it('should be 0.5', () => tf.tidy(() => { 25 | 26 | const box1 = new Rect(0, 0, 20, 20) 27 | const box2 = new Rect(0, 0, 10, 20) 28 | 29 | expect(iou(box1, box2)).toEqual(0.5) 30 | 31 | })) 32 | 33 | it('should be 0.5', () => tf.tidy(() => { 34 | 35 | const box1 = new Rect(0, 0, 20, 20) 36 | const box2 = new Rect(0, 10, 20, 10) 37 | 38 | expect(iou(box1, box2)).toEqual(0.5) 39 | 40 | })) 41 | 42 | }) -------------------------------------------------------------------------------- /src/faceFeatureExtractor/extractParamsFromWeigthMap.ts: -------------------------------------------------------------------------------- 1 | import * as tf from '@tensorflow/tfjs-core'; 2 | 3 | import { disposeUnusedWeightTensors, ParamMapping } from '../common'; 4 | import { loadParamsFactory } from './loadParamsFactory'; 5 | import { FaceFeatureExtractorParams } from './types'; 6 | 7 | export function extractParamsFromWeigthMap( 8 | weightMap: tf.NamedTensorMap 9 | ): { params: FaceFeatureExtractorParams, paramMappings: ParamMapping[] } { 10 | 11 | const paramMappings: ParamMapping[] = [] 12 | 13 | const { 14 | extractDenseBlock4Params 15 | } = loadParamsFactory(weightMap, paramMappings) 16 | 17 | const params = { 18 | dense0: extractDenseBlock4Params('dense0', true), 19 | dense1: extractDenseBlock4Params('dense1'), 20 | dense2: extractDenseBlock4Params('dense2'), 21 | dense3: extractDenseBlock4Params('dense3') 22 | } 23 | 24 | disposeUnusedWeightTensors(weightMap, paramMappings) 25 | 26 | return { params, paramMappings } 27 | } -------------------------------------------------------------------------------- /src/faceProcessor/extractParamsFromWeigthMap.ts: -------------------------------------------------------------------------------- 1 | import * as tf from '@tensorflow/tfjs-core'; 2 | 3 | import { disposeUnusedWeightTensors, extractWeightEntryFactory, FCParams, ParamMapping } from '../common'; 4 | import { NetParams } from './types'; 5 | 6 | export function extractParamsFromWeigthMap( 7 | weightMap: tf.NamedTensorMap 8 | ): { params: NetParams, paramMappings: ParamMapping[] } { 9 | 10 | const paramMappings: ParamMapping[] = [] 11 | 12 | const extractWeightEntry = extractWeightEntryFactory(weightMap, paramMappings) 13 | 14 | function extractFcParams(prefix: string): FCParams { 15 | const weights = extractWeightEntry(`${prefix}/weights`, 2) 16 | const bias = extractWeightEntry(`${prefix}/bias`, 1) 17 | return { weights, bias } 18 | } 19 | 20 | const params = { 21 | fc: extractFcParams('fc') 22 | } 23 | 24 | disposeUnusedWeightTensors(weightMap, paramMappings) 25 | 26 | return { params, paramMappings } 27 | } -------------------------------------------------------------------------------- /src/faceExpressionNet/FaceExpressions.ts: -------------------------------------------------------------------------------- 1 | export const FACE_EXPRESSION_LABELS = ['neutral', 'happy', 'sad', 'angry', 'fearful', 'disgusted', 'surprised'] 2 | 3 | export class FaceExpressions { 4 | public neutral: number 5 | public happy: number 6 | public sad: number 7 | public angry: number 8 | public fearful: number 9 | public disgusted: number 10 | public surprised: number 11 | 12 | constructor(probabilities: number[] | Float32Array) { 13 | if (probabilities.length !== 7) { 14 | throw new Error(`FaceExpressions.constructor - expected probabilities.length to be 7, have: ${probabilities.length}`) 15 | } 16 | 17 | FACE_EXPRESSION_LABELS.forEach((expression, idx) => { 18 | this[expression] = probabilities[idx] 19 | }) 20 | } 21 | 22 | asSortedArray() { 23 | return FACE_EXPRESSION_LABELS 24 | .map(expression => ({ expression, probability: this[expression] as number })) 25 | .sort((e0, e1) => e1.probability - e0.probability) 26 | } 27 | } -------------------------------------------------------------------------------- /examples/examples-nodejs/faceExpressionRecognition.ts: -------------------------------------------------------------------------------- 1 | import * as faceapi from 'face-api.js'; 2 | 3 | import { canvas, faceDetectionNet, faceDetectionOptions, saveFile } from './commons'; 4 | 5 | async function run() { 6 | 7 | await faceDetectionNet.loadFromDisk('../../weights') 8 | await faceapi.nets.faceLandmark68Net.loadFromDisk('../../weights') 9 | await faceapi.nets.faceExpressionNet.loadFromDisk('../../weights') 10 | 11 | const img = await canvas.loadImage('../images/surprised.jpg') 12 | const results = await faceapi.detectAllFaces(img, faceDetectionOptions) 13 | .withFaceLandmarks() 14 | .withFaceExpressions() 15 | 16 | const out = faceapi.createCanvasFromMedia(img) as any 17 | faceapi.draw.drawDetections(out, results.map(res => res.detection)) 18 | faceapi.draw.drawFaceExpressions(out, results) 19 | 20 | saveFile('faceExpressionRecognition.jpg', out.toBuffer('image/jpeg')) 21 | console.log('done, saved results to out/faceExpressionRecognition.jpg') 22 | } 23 | 24 | run() -------------------------------------------------------------------------------- /src/faceRecognitionNet/convLayer.ts: -------------------------------------------------------------------------------- 1 | import * as tf from '@tensorflow/tfjs-core'; 2 | 3 | import { scale } from './scaleLayer'; 4 | import { ConvLayerParams } from './types'; 5 | 6 | 7 | function convLayer( 8 | x: tf.Tensor4D, 9 | params: ConvLayerParams, 10 | strides: [number, number], 11 | withRelu: boolean, 12 | padding: 'valid' | 'same' = 'same' 13 | ): tf.Tensor4D { 14 | const { filters, bias } = params.conv 15 | 16 | let out = tf.conv2d(x, filters, strides, padding) 17 | out = tf.add(out, bias) 18 | out = scale(out, params.scale) 19 | return withRelu ? tf.relu(out) : out 20 | } 21 | 22 | export function conv(x: tf.Tensor4D, params: ConvLayerParams) { 23 | return convLayer(x, params, [1, 1], true) 24 | } 25 | 26 | export function convNoRelu(x: tf.Tensor4D, params: ConvLayerParams) { 27 | return convLayer(x, params, [1, 1], false) 28 | } 29 | 30 | export function convDown(x: tf.Tensor4D, params: ConvLayerParams) { 31 | return convLayer(x, params, [2, 2], true, 'valid') 32 | } -------------------------------------------------------------------------------- /src/ssdMobilenetv1/SsdMobilenetv1Options.ts: -------------------------------------------------------------------------------- 1 | export interface ISsdMobilenetv1Options { 2 | minConfidence?: number 3 | maxResults?: number 4 | } 5 | 6 | export class SsdMobilenetv1Options { 7 | protected _name: string = 'SsdMobilenetv1Options' 8 | 9 | private _minConfidence: number 10 | private _maxResults: number 11 | 12 | constructor({ minConfidence, maxResults }: ISsdMobilenetv1Options = {}) { 13 | this._minConfidence = minConfidence || 0.5 14 | this._maxResults = maxResults || 100 15 | 16 | if (typeof this._minConfidence !== 'number' || this._minConfidence <= 0 || this._minConfidence >= 1) { 17 | throw new Error(`${this._name} - expected minConfidence to be a number between 0 and 1`) 18 | } 19 | 20 | if (typeof this._maxResults !== 'number') { 21 | throw new Error(`${this._name} - expected maxResults to be a number`) 22 | } 23 | } 24 | 25 | get minConfidence(): number { return this._minConfidence } 26 | get maxResults(): number { return this._maxResults } 27 | } -------------------------------------------------------------------------------- /test/env.node.ts: -------------------------------------------------------------------------------- 1 | import * as fs from 'fs'; 2 | import * as path from 'path'; 3 | 4 | import { env, NeuralNetwork } from '../src'; 5 | import { TestEnv } from './Environment'; 6 | 7 | require('@tensorflow/tfjs-node') 8 | const canvas = require('canvas') 9 | 10 | const { Canvas, Image, ImageData } = canvas 11 | env.monkeyPatch({ Canvas, Image, ImageData }) 12 | 13 | async function loadImageNode(uri: string): Promise { 14 | return canvas.loadImage(path.resolve(__dirname, '../', uri)) 15 | } 16 | 17 | async function loadJsonNode(uri: string): Promise { 18 | return JSON.parse(fs.readFileSync(path.resolve(__dirname, '../', uri)).toString()) 19 | } 20 | 21 | export async function initNetNode>(net: TNet) { 22 | await net.loadFromDisk(path.resolve(__dirname, '../weights')) 23 | } 24 | 25 | const nodeTestEnv: TestEnv = { 26 | loadImage: loadImageNode, 27 | loadJson: loadJsonNode, 28 | initNet: initNetNode 29 | } 30 | 31 | global['nodeTestEnv'] = nodeTestEnv -------------------------------------------------------------------------------- /test/utils/index.test.ts: -------------------------------------------------------------------------------- 1 | import { utils } from '../../src'; 2 | 3 | describe('utils', () => { 4 | 5 | describe('isValidNumber', () => { 6 | 7 | it('0 is valid', () => { 8 | expect(utils.isValidNumber(0)).toBe(true) 9 | }) 10 | 11 | it('1 is valid', () => { 12 | expect(utils.isValidNumber(1)).toBe(true) 13 | }) 14 | 15 | it('-1 is valid', () => { 16 | expect(utils.isValidNumber(-1)).toBe(true) 17 | }) 18 | 19 | it('NaN is invalid', () => { 20 | expect(utils.isValidNumber(NaN)).toBe(false) 21 | }) 22 | 23 | it('Infinity is invalid', () => { 24 | expect(utils.isValidNumber(Infinity)).toBe(false) 25 | }) 26 | 27 | it('-Infinity is invalid', () => { 28 | expect(utils.isValidNumber(-Infinity)).toBe(false) 29 | }) 30 | 31 | it('null is invalid', () => { 32 | expect(utils.isValidNumber(null)).toBe(false) 33 | }) 34 | 35 | it('undefined is invalid', () => { 36 | expect(utils.isValidNumber(undefined)).toBe(false) 37 | }) 38 | 39 | }) 40 | }) 41 | -------------------------------------------------------------------------------- /src/ageGenderNet/extractParamsFromWeigthMap.ts: -------------------------------------------------------------------------------- 1 | import * as tf from '@tensorflow/tfjs-core'; 2 | 3 | import { disposeUnusedWeightTensors, extractWeightEntryFactory, FCParams, ParamMapping } from '../common'; 4 | import { NetParams } from './types'; 5 | 6 | export function extractParamsFromWeigthMap( 7 | weightMap: tf.NamedTensorMap 8 | ): { params: NetParams, paramMappings: ParamMapping[] } { 9 | 10 | const paramMappings: ParamMapping[] = [] 11 | 12 | const extractWeightEntry = extractWeightEntryFactory(weightMap, paramMappings) 13 | 14 | function extractFcParams(prefix: string): FCParams { 15 | const weights = extractWeightEntry(`${prefix}/weights`, 2) 16 | const bias = extractWeightEntry(`${prefix}/bias`, 1) 17 | return { weights, bias } 18 | } 19 | 20 | const params = { 21 | fc: { 22 | age: extractFcParams('fc/age'), 23 | gender: extractFcParams('fc/gender') 24 | } 25 | } 26 | 27 | disposeUnusedWeightTensors(weightMap, paramMappings) 28 | 29 | return { params, paramMappings } 30 | } -------------------------------------------------------------------------------- /src/faceRecognitionNet/types.ts: -------------------------------------------------------------------------------- 1 | import * as tf from '@tensorflow/tfjs-core'; 2 | 3 | import { ConvParams } from '../common'; 4 | 5 | export type ScaleLayerParams = { 6 | weights: tf.Tensor1D 7 | biases: tf.Tensor1D 8 | } 9 | export type ResidualLayerParams = { 10 | conv1: ConvLayerParams 11 | conv2: ConvLayerParams 12 | } 13 | 14 | export type ConvLayerParams = { 15 | conv: ConvParams 16 | scale: ScaleLayerParams 17 | } 18 | 19 | export type NetParams = { 20 | conv32_down: ConvLayerParams 21 | conv32_1: ResidualLayerParams 22 | conv32_2: ResidualLayerParams 23 | conv32_3: ResidualLayerParams 24 | conv64_down: ResidualLayerParams 25 | conv64_1: ResidualLayerParams 26 | conv64_2: ResidualLayerParams 27 | conv64_3: ResidualLayerParams 28 | conv128_down: ResidualLayerParams 29 | conv128_1: ResidualLayerParams 30 | conv128_2: ResidualLayerParams 31 | conv256_down: ResidualLayerParams 32 | conv256_1: ResidualLayerParams 33 | conv256_2: ResidualLayerParams 34 | conv256_down_out: ResidualLayerParams 35 | fc: tf.Tensor2D 36 | } -------------------------------------------------------------------------------- /src/mtcnn/RNet.ts: -------------------------------------------------------------------------------- 1 | import * as tf from '@tensorflow/tfjs-core'; 2 | 3 | import { fullyConnectedLayer } from '../common/fullyConnectedLayer'; 4 | import { prelu } from './prelu'; 5 | import { sharedLayer } from './sharedLayers'; 6 | import { RNetParams } from './types'; 7 | 8 | export function RNet(x: tf.Tensor4D, params: RNetParams): { scores: tf.Tensor1D, regions: tf.Tensor2D } { 9 | return tf.tidy(() => { 10 | 11 | const convOut = sharedLayer(x, params) 12 | const vectorized = tf.reshape(convOut, [convOut.shape[0], params.fc1.weights.shape[0]]) as tf.Tensor2D 13 | const fc1 = fullyConnectedLayer(vectorized, params.fc1) 14 | const prelu4 = prelu(fc1, params.prelu4_alpha) 15 | const fc2_1 = fullyConnectedLayer(prelu4, params.fc2_1) 16 | const max = tf.expandDims(tf.max(fc2_1, 1), 1) 17 | const prob = tf.softmax(tf.sub(fc2_1, max), 1) as tf.Tensor2D 18 | const regions = fullyConnectedLayer(prelu4, params.fc2_2) 19 | 20 | const scores = tf.unstack(prob, 1)[1] as tf.Tensor1D 21 | return { scores, regions } 22 | }) 23 | } -------------------------------------------------------------------------------- /src/classes/PredictedBox.ts: -------------------------------------------------------------------------------- 1 | import { isValidProbablitiy } from '../utils'; 2 | import { IBoundingBox } from './BoundingBox'; 3 | import { LabeledBox } from './LabeledBox'; 4 | import { IRect } from './Rect'; 5 | 6 | export class PredictedBox extends LabeledBox { 7 | 8 | public static assertIsValidPredictedBox(box: any, callee: string) { 9 | LabeledBox.assertIsValidLabeledBox(box, callee) 10 | 11 | if ( 12 | !isValidProbablitiy(box.score) 13 | || !isValidProbablitiy(box.classScore) 14 | ) { 15 | throw new Error(`${callee} - expected properties score (${box.score}) and (${box.classScore}) to be a number between [0, 1]`) 16 | } 17 | } 18 | 19 | private _score: number 20 | private _classScore: number 21 | 22 | constructor(box: IBoundingBox | IRect | any, label: number, score: number, classScore: number) { 23 | super(box, label) 24 | this._score = score 25 | this._classScore = classScore 26 | } 27 | 28 | public get score(): number { return this._score } 29 | public get classScore(): number { return this._classScore } 30 | 31 | } -------------------------------------------------------------------------------- /src/classes/FaceLandmarks68.ts: -------------------------------------------------------------------------------- 1 | import { getCenterPoint } from '../utils'; 2 | import { FaceLandmarks } from './FaceLandmarks'; 3 | import { Point } from './Point'; 4 | 5 | export class FaceLandmarks68 extends FaceLandmarks { 6 | public getJawOutline(): Point[] { 7 | return this.positions.slice(0, 17) 8 | } 9 | 10 | public getLeftEyeBrow(): Point[] { 11 | return this.positions.slice(17, 22) 12 | } 13 | 14 | public getRightEyeBrow(): Point[] { 15 | return this.positions.slice(22, 27) 16 | } 17 | 18 | public getNose(): Point[] { 19 | return this.positions.slice(27, 36) 20 | } 21 | 22 | public getLeftEye(): Point[] { 23 | return this.positions.slice(36, 42) 24 | } 25 | 26 | public getRightEye(): Point[] { 27 | return this.positions.slice(42, 48) 28 | } 29 | 30 | public getMouth(): Point[] { 31 | return this.positions.slice(48, 68) 32 | } 33 | 34 | protected getRefPointsForAlignment(): Point[] { 35 | return [ 36 | this.getLeftEye(), 37 | this.getRightEye(), 38 | this.getMouth() 39 | ].map(getCenterPoint) 40 | } 41 | } -------------------------------------------------------------------------------- /src/ops/nonMaxSuppression.ts: -------------------------------------------------------------------------------- 1 | import { Box } from '../classes/Box'; 2 | import { iou } from './iou'; 3 | 4 | export function nonMaxSuppression( 5 | boxes: Box[], 6 | scores: number[], 7 | iouThreshold: number, 8 | isIOU: boolean = true 9 | ): number[] { 10 | 11 | let indicesSortedByScore = scores 12 | .map((score, boxIndex) => ({ score, boxIndex })) 13 | .sort((c1, c2) => c1.score - c2.score) 14 | .map(c => c.boxIndex) 15 | 16 | const pick: number[] = [] 17 | 18 | while(indicesSortedByScore.length > 0) { 19 | const curr = indicesSortedByScore.pop() as number 20 | pick.push(curr) 21 | 22 | const indices = indicesSortedByScore 23 | 24 | const outputs: number[] = [] 25 | for (let i = 0; i < indices.length; i++) { 26 | const idx = indices[i] 27 | 28 | const currBox = boxes[curr] 29 | const idxBox = boxes[idx] 30 | 31 | outputs.push(iou(currBox, idxBox, isIOU)) 32 | } 33 | 34 | indicesSortedByScore = indicesSortedByScore.filter( 35 | (_, j) => outputs[j] <= iouThreshold 36 | ) 37 | } 38 | 39 | return pick 40 | 41 | } -------------------------------------------------------------------------------- /src/faceFeatureExtractor/extractParamsTiny.ts: -------------------------------------------------------------------------------- 1 | import { extractWeightsFactory, ParamMapping } from '../common'; 2 | import { extractorsFactory } from './extractorsFactory'; 3 | import { TinyFaceFeatureExtractorParams } from './types'; 4 | 5 | 6 | 7 | export function extractParamsTiny(weights: Float32Array): { params: TinyFaceFeatureExtractorParams, paramMappings: ParamMapping[] } { 8 | 9 | const paramMappings: ParamMapping[] = [] 10 | 11 | const { 12 | extractWeights, 13 | getRemainingWeights 14 | } = extractWeightsFactory(weights) 15 | 16 | const { 17 | extractDenseBlock3Params 18 | } = extractorsFactory(extractWeights, paramMappings) 19 | 20 | const dense0 = extractDenseBlock3Params(3, 32, 'dense0', true) 21 | const dense1 = extractDenseBlock3Params(32, 64, 'dense1') 22 | const dense2 = extractDenseBlock3Params(64, 128, 'dense2') 23 | 24 | if (getRemainingWeights().length !== 0) { 25 | throw new Error(`weights remaing after extract: ${getRemainingWeights().length}`) 26 | } 27 | 28 | return { 29 | paramMappings, 30 | params: { dense0, dense1, dense2 } 31 | } 32 | } -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | //"allowUnreachableCode": true, 4 | //"noUnusedLocals": false, 5 | //"noUnusedParameters": false, 6 | 7 | 8 | "removeComments": false, 9 | "preserveConstEnums": true, 10 | "emitDecoratorMetadata": true, 11 | "experimentalDecorators": true, 12 | "sourceMap": true, 13 | "declaration": true, 14 | "noImplicitAny": true, 15 | "noImplicitReturns": true, 16 | "noImplicitThis": true, 17 | "noFallthroughCasesInSwitch": true, 18 | "suppressImplicitAnyIndexErrors": true, 19 | "strictNullChecks": true, 20 | "importHelpers": true, 21 | "skipLibCheck": true, 22 | "outDir": "build/commonjs", 23 | "module": "commonjs", 24 | "target": "es5", 25 | "moduleResolution": "node", 26 | "lib": ["es2015", "dom"], 27 | "typeRoots": [ 28 | "typings", 29 | "node_modules/@types" 30 | ] 31 | }, 32 | "formatCodeOptions": { 33 | "indentSize": 2, 34 | "tabSize": 2 35 | }, 36 | "exclude": [ 37 | "node_modules", 38 | "dist" 39 | ], 40 | "include": [ 41 | "src" 42 | ] 43 | } -------------------------------------------------------------------------------- /src/draw/drawDetections.ts: -------------------------------------------------------------------------------- 1 | import { Box, IBoundingBox, IRect } from '../classes'; 2 | import { FaceDetection } from '../classes/FaceDetection'; 3 | import { isWithFaceDetection, WithFaceDetection } from '../factories/WithFaceDetection'; 4 | import { round } from '../utils'; 5 | import { DrawBox } from './DrawBox'; 6 | 7 | export type TDrawDetectionsInput = IRect | IBoundingBox | FaceDetection | WithFaceDetection<{}> 8 | 9 | export function drawDetections( 10 | canvasArg: string | HTMLCanvasElement, 11 | detections: TDrawDetectionsInput | Array 12 | ) { 13 | const detectionsArray = Array.isArray(detections) ? detections : [detections] 14 | 15 | detectionsArray.forEach(det => { 16 | const score = det instanceof FaceDetection 17 | ? det.score 18 | : (isWithFaceDetection(det) ? det.detection.score : undefined) 19 | 20 | const box = det instanceof FaceDetection 21 | ? det.box 22 | : (isWithFaceDetection(det) ? det.detection.box : new Box(det)) 23 | 24 | const label = score ? `${round(score)}` : undefined 25 | new DrawBox(box, { label }).draw(canvasArg) 26 | }) 27 | } -------------------------------------------------------------------------------- /src/classes/Point.ts: -------------------------------------------------------------------------------- 1 | export interface IPoint { 2 | x: number 3 | y: number 4 | } 5 | 6 | export class Point implements IPoint { 7 | private _x: number 8 | private _y: number 9 | 10 | constructor(x: number, y: number) { 11 | this._x = x 12 | this._y = y 13 | } 14 | 15 | get x(): number { return this._x } 16 | get y(): number { return this._y } 17 | 18 | public add(pt: IPoint): Point { 19 | return new Point(this.x + pt.x, this.y + pt.y) 20 | } 21 | 22 | public sub(pt: IPoint): Point { 23 | return new Point(this.x - pt.x, this.y - pt.y) 24 | } 25 | 26 | public mul(pt: IPoint): Point { 27 | return new Point(this.x * pt.x, this.y * pt.y) 28 | } 29 | 30 | public div(pt: IPoint): Point { 31 | return new Point(this.x / pt.x, this.y / pt.y) 32 | } 33 | 34 | public abs(): Point { 35 | return new Point(Math.abs(this.x), Math.abs(this.y)) 36 | } 37 | 38 | public magnitude(): number { 39 | return Math.sqrt(Math.pow(this.x, 2) + Math.pow(this.y, 2)) 40 | } 41 | 42 | public floor(): Point { 43 | return new Point(Math.floor(this.x), Math.floor(this.y)) 44 | } 45 | } -------------------------------------------------------------------------------- /src/tinyYolov2/types.ts: -------------------------------------------------------------------------------- 1 | import * as tf from '@tensorflow/tfjs-core'; 2 | 3 | import { ConvParams } from '../common'; 4 | import { SeparableConvParams } from '../common/types'; 5 | 6 | export type BatchNorm = { 7 | sub: tf.Tensor1D 8 | truediv: tf.Tensor1D 9 | } 10 | 11 | export type ConvWithBatchNorm = { 12 | conv: ConvParams 13 | bn: BatchNorm 14 | } 15 | 16 | export type MobilenetParams = { 17 | conv0: SeparableConvParams | ConvParams 18 | conv1: SeparableConvParams 19 | conv2: SeparableConvParams 20 | conv3: SeparableConvParams 21 | conv4: SeparableConvParams 22 | conv5: SeparableConvParams 23 | conv6?: SeparableConvParams 24 | conv7?: SeparableConvParams 25 | conv8: ConvParams 26 | } 27 | 28 | export type DefaultTinyYolov2NetParams = { 29 | conv0: ConvWithBatchNorm 30 | conv1: ConvWithBatchNorm 31 | conv2: ConvWithBatchNorm 32 | conv3: ConvWithBatchNorm 33 | conv4: ConvWithBatchNorm 34 | conv5: ConvWithBatchNorm 35 | conv6: ConvWithBatchNorm 36 | conv7: ConvWithBatchNorm 37 | conv8: ConvParams 38 | } 39 | 40 | export type TinyYolov2NetParams = DefaultTinyYolov2NetParams | MobilenetParams -------------------------------------------------------------------------------- /src/common/getModelUris.ts: -------------------------------------------------------------------------------- 1 | export function getModelUris(uri: string | undefined, defaultModelName: string) { 2 | const defaultManifestFilename = `${defaultModelName}-weights_manifest.json` 3 | 4 | if (!uri) { 5 | return { 6 | modelBaseUri: '', 7 | manifestUri: defaultManifestFilename 8 | } 9 | } 10 | 11 | if (uri === '/') { 12 | return { 13 | modelBaseUri: '/', 14 | manifestUri: `/${defaultManifestFilename}` 15 | } 16 | } 17 | const protocol = uri.startsWith('http://') ? 'http://' : uri.startsWith('https://') ? 'https://' : ''; 18 | uri = uri.replace(protocol, ''); 19 | 20 | const parts = uri.split('/').filter(s => s) 21 | 22 | const manifestFile = uri.endsWith('.json') 23 | ? parts[parts.length - 1] 24 | : defaultManifestFilename 25 | 26 | let modelBaseUri = protocol + (uri.endsWith('.json') ? parts.slice(0, parts.length - 1) : parts).join('/') 27 | modelBaseUri = uri.startsWith('/') ? `/${modelBaseUri}` : modelBaseUri 28 | 29 | return { 30 | modelBaseUri, 31 | manifestUri: modelBaseUri === '/' ? `/${manifestFile}` : `${modelBaseUri}/${manifestFile}` 32 | } 33 | } -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 Vincent Mühler 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /src/tinyYolov2/TinyYolov2Options.ts: -------------------------------------------------------------------------------- 1 | export enum TinyYolov2SizeType { 2 | XS = 224, 3 | SM = 320, 4 | MD = 416, 5 | LG = 608 6 | } 7 | 8 | export interface ITinyYolov2Options { 9 | inputSize?: number 10 | scoreThreshold?: number 11 | } 12 | 13 | export class TinyYolov2Options { 14 | protected _name: string = 'TinyYolov2Options' 15 | 16 | private _inputSize: number 17 | private _scoreThreshold: number 18 | 19 | constructor({ inputSize, scoreThreshold }: ITinyYolov2Options = {}) { 20 | this._inputSize = inputSize || 416 21 | this._scoreThreshold = scoreThreshold || 0.5 22 | 23 | if (typeof this._inputSize !== 'number' || this._inputSize % 32 !== 0) { 24 | throw new Error(`${this._name} - expected inputSize to be a number divisible by 32`) 25 | } 26 | 27 | if (typeof this._scoreThreshold !== 'number' || this._scoreThreshold <= 0 || this._scoreThreshold >= 1) { 28 | throw new Error(`${this._name} - expected scoreThreshold to be a number between 0 and 1`) 29 | } 30 | } 31 | 32 | get inputSize(): number { return this._inputSize } 33 | get scoreThreshold(): number { return this._scoreThreshold } 34 | } -------------------------------------------------------------------------------- /src/faceFeatureExtractor/extractParams.ts: -------------------------------------------------------------------------------- 1 | import { extractWeightsFactory, ParamMapping } from '../common'; 2 | import { extractorsFactory } from './extractorsFactory'; 3 | import { FaceFeatureExtractorParams } from './types'; 4 | 5 | 6 | export function extractParams(weights: Float32Array): { params: FaceFeatureExtractorParams, paramMappings: ParamMapping[] } { 7 | 8 | const paramMappings: ParamMapping[] = [] 9 | 10 | const { 11 | extractWeights, 12 | getRemainingWeights 13 | } = extractWeightsFactory(weights) 14 | 15 | const { 16 | extractDenseBlock4Params 17 | } = extractorsFactory(extractWeights, paramMappings) 18 | 19 | const dense0 = extractDenseBlock4Params(3, 32, 'dense0', true) 20 | const dense1 = extractDenseBlock4Params(32, 64, 'dense1') 21 | const dense2 = extractDenseBlock4Params(64, 128, 'dense2') 22 | const dense3 = extractDenseBlock4Params(128, 256, 'dense3') 23 | 24 | if (getRemainingWeights().length !== 0) { 25 | throw new Error(`weights remaing after extract: ${getRemainingWeights().length}`) 26 | } 27 | 28 | return { 29 | paramMappings, 30 | params: { dense0, dense1, dense2, dense3 } 31 | } 32 | } -------------------------------------------------------------------------------- /src/mtcnn/types.ts: -------------------------------------------------------------------------------- 1 | import * as tf from '@tensorflow/tfjs-core'; 2 | 3 | import { FaceLandmarks5 } from '../classes/FaceLandmarks5'; 4 | import { ConvParams, FCParams } from '../common'; 5 | import { WithFaceDetection, WithFaceLandmarks } from '../factories'; 6 | 7 | export type SharedParams = { 8 | conv1: ConvParams 9 | prelu1_alpha: tf.Tensor1D 10 | conv2: ConvParams 11 | prelu2_alpha: tf.Tensor1D 12 | conv3: ConvParams 13 | prelu3_alpha: tf.Tensor1D 14 | } 15 | 16 | export type PNetParams = SharedParams & { 17 | conv4_1: ConvParams 18 | conv4_2: ConvParams 19 | } 20 | 21 | export type RNetParams = SharedParams & { 22 | fc1: FCParams 23 | prelu4_alpha: tf.Tensor1D 24 | fc2_1: FCParams 25 | fc2_2: FCParams 26 | } 27 | 28 | export type ONetParams = SharedParams & { 29 | conv4: ConvParams 30 | prelu4_alpha: tf.Tensor1D 31 | fc1: FCParams 32 | prelu5_alpha: tf.Tensor1D 33 | fc2_1: FCParams 34 | fc2_2: FCParams 35 | fc2_3: FCParams 36 | } 37 | 38 | export type NetParams = { 39 | pnet: PNetParams 40 | rnet: RNetParams 41 | onet: ONetParams 42 | } 43 | 44 | export type MtcnnResult = WithFaceLandmarks, FaceLandmarks5> 45 | -------------------------------------------------------------------------------- /src/classes/LabeledFaceDescriptors.ts: -------------------------------------------------------------------------------- 1 | export class LabeledFaceDescriptors { 2 | private _label: string 3 | private _descriptors: Float32Array[] 4 | 5 | constructor(label: string, descriptors: Float32Array[]) { 6 | if (!(typeof label === 'string')) { 7 | throw new Error('LabeledFaceDescriptors - constructor expected label to be a string') 8 | } 9 | 10 | if (!Array.isArray(descriptors) || descriptors.some(desc => !(desc instanceof Float32Array))) { 11 | throw new Error('LabeledFaceDescriptors - constructor expected descriptors to be an array of Float32Array') 12 | } 13 | 14 | this._label = label 15 | this._descriptors = descriptors 16 | } 17 | 18 | public get label(): string { return this._label } 19 | public get descriptors(): Float32Array[] { return this._descriptors } 20 | 21 | public toJSON(): any { 22 | return { 23 | label: this.label, 24 | descriptors: this.descriptors.map((d) => Array.from(d)) 25 | }; 26 | } 27 | 28 | public static fromJSON(json: any): LabeledFaceDescriptors { 29 | const descriptors = json.descriptors.map((d: any) => { 30 | return new Float32Array(d); 31 | }); 32 | return new LabeledFaceDescriptors(json.label, descriptors); 33 | } 34 | 35 | } -------------------------------------------------------------------------------- /src/dom/createCanvas.ts: -------------------------------------------------------------------------------- 1 | import { IDimensions } from '../classes/Dimensions'; 2 | import { env } from '../env'; 3 | import { getContext2dOrThrow } from './getContext2dOrThrow'; 4 | import { getMediaDimensions } from './getMediaDimensions'; 5 | import { isMediaLoaded } from './isMediaLoaded'; 6 | 7 | export function createCanvas({ width, height }: IDimensions): HTMLCanvasElement { 8 | 9 | const { createCanvasElement } = env.getEnv() 10 | const canvas = createCanvasElement() 11 | canvas.width = width 12 | canvas.height = height 13 | return canvas 14 | } 15 | 16 | export function createCanvasFromMedia(media: HTMLImageElement | HTMLVideoElement | ImageData, dims?: IDimensions): HTMLCanvasElement { 17 | 18 | const { ImageData } = env.getEnv() 19 | 20 | if (!(media instanceof ImageData) && !isMediaLoaded(media)) { 21 | throw new Error('createCanvasFromMedia - media has not finished loading yet') 22 | } 23 | 24 | const { width, height } = dims || getMediaDimensions(media) 25 | const canvas = createCanvas({ width, height }) 26 | 27 | if (media instanceof ImageData) { 28 | getContext2dOrThrow(canvas).putImageData(media, 0, 0) 29 | } else { 30 | getContext2dOrThrow(canvas).drawImage(media, 0, 0, width, height) 31 | } 32 | return canvas 33 | } -------------------------------------------------------------------------------- /rollup.config.js: -------------------------------------------------------------------------------- 1 | import commonjs from 'rollup-plugin-commonjs'; 2 | import node from 'rollup-plugin-node-resolve'; 3 | import typescript from 'rollup-plugin-typescript2'; 4 | import { uglify } from 'rollup-plugin-uglify'; 5 | import path from 'path'; 6 | 7 | const { minify } = process.env 8 | 9 | export default { 10 | input: 'src/index.ts', 11 | plugins: [ 12 | typescript({ 13 | tsconfigOverride: { 14 | compilerOptions: { 15 | module: 'ES2015', 16 | declaration: false 17 | } 18 | } 19 | }), 20 | node(), 21 | commonjs({ 22 | include: 'node_modules/**' 23 | }) 24 | ].concat(minify ? uglify() : []), 25 | output: { 26 | extend: true, 27 | file: `dist/face-api${minify ? '.min' : ''}.js`, 28 | format: 'umd', 29 | name: 'faceapi', 30 | globals: { 31 | 'crypto': 'crypto' 32 | }, 33 | sourcemap: minify ? false : true 34 | }, 35 | external: ['crypto'], 36 | onwarn: (warning) => { 37 | const ignoreWarnings = ['CIRCULAR_DEPENDENCY', 'CIRCULAR', 'THIS_IS_UNDEFINED'] 38 | if (ignoreWarnings.some(w => w === warning.code)) 39 | return 40 | 41 | if (warning.missing === 'alea') 42 | return 43 | 44 | console.warn(warning.message) 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /src/dom/imageToSquare.ts: -------------------------------------------------------------------------------- 1 | import { env } from '../env'; 2 | import { createCanvas, createCanvasFromMedia } from './createCanvas'; 3 | import { getContext2dOrThrow } from './getContext2dOrThrow'; 4 | import { getMediaDimensions } from './getMediaDimensions'; 5 | 6 | export function imageToSquare(input: HTMLImageElement | HTMLCanvasElement, inputSize: number, centerImage: boolean = false) { 7 | 8 | const { Image, Canvas } = env.getEnv() 9 | 10 | if (!(input instanceof Image || input instanceof Canvas)) { 11 | throw new Error('imageToSquare - expected arg0 to be HTMLImageElement | HTMLCanvasElement') 12 | } 13 | 14 | const dims = getMediaDimensions(input) 15 | const scale = inputSize / Math.max(dims.height, dims.width) 16 | const width = scale * dims.width 17 | const height = scale * dims.height 18 | 19 | const targetCanvas = createCanvas({ width: inputSize, height: inputSize }) 20 | const inputCanvas = input instanceof Canvas ? input : createCanvasFromMedia(input) 21 | 22 | const offset = Math.abs(width - height) / 2 23 | const dx = centerImage && width < height ? offset : 0 24 | const dy = centerImage && height < width ? offset : 0 25 | getContext2dOrThrow(targetCanvas).drawImage(inputCanvas, dx, dy, width, height) 26 | 27 | return targetCanvas 28 | } -------------------------------------------------------------------------------- /examples/examples-nodejs/ageAndGenderRecognition.ts: -------------------------------------------------------------------------------- 1 | import * as faceapi from 'face-api.js'; 2 | 3 | import { canvas, faceDetectionNet, faceDetectionOptions, saveFile } from './commons'; 4 | 5 | async function run() { 6 | 7 | await faceDetectionNet.loadFromDisk('../../weights') 8 | await faceapi.nets.faceLandmark68Net.loadFromDisk('../../weights') 9 | await faceapi.nets.ageGenderNet.loadFromDisk('../../weights') 10 | 11 | const img = await canvas.loadImage('../images/bbt1.jpg') 12 | const results = await faceapi.detectAllFaces(img, faceDetectionOptions) 13 | .withFaceLandmarks() 14 | .withAgeAndGender() 15 | 16 | const out = faceapi.createCanvasFromMedia(img) as any 17 | faceapi.draw.drawDetections(out, results.map(res => res.detection)) 18 | results.forEach(result => { 19 | const { age, gender, genderProbability } = result 20 | new faceapi.draw.DrawTextField( 21 | [ 22 | `${faceapi.utils.round(age, 0)} years`, 23 | `${gender} (${faceapi.utils.round(genderProbability)})` 24 | ], 25 | result.detection.box.bottomLeft 26 | ).draw(out) 27 | }) 28 | 29 | saveFile('ageAndGenderRecognition.jpg', out.toBuffer('image/jpeg')) 30 | console.log('done, saved results to out/ageAndGenderRecognition.jpg') 31 | } 32 | 33 | run() -------------------------------------------------------------------------------- /typedoc.config.js: -------------------------------------------------------------------------------- 1 | const path = require('path') 2 | const fs = require('fs') 3 | 4 | const excludes = [ 5 | { dir: 'faceLandmarkNet', exceptions: ['index.ts', 'FaceLandmark68Net.ts', 'FaceLandmark68TinyNet.ts'] }, 6 | { dir: 'faceRecognitionNet', exceptions: ['index.ts', 'FaceRecognitionNet.ts'] }, 7 | { dir: 'mtcnn', exceptions: ['index.ts', 'Mtcnn.ts', 'MtcnnOptions.ts'] }, 8 | { dir: 'ssdMobilenetv1', exceptions: ['index.ts', 'SsdMobilenetv1.ts', 'SsdMobilenetv1Options.ts'] }, 9 | { dir: 'tinyFaceDetector', exceptions: ['index.ts', 'TinyFaceDetector.ts', 'TinyFaceDetectorOptions.ts'] }, 10 | { dir: 'tinyYolov2', exceptions: ['index.ts', 'TinyYolov2.ts'] } 11 | ] 12 | 13 | const exclude = excludes.map(({ dir, exceptions }) => { 14 | const files = fs.readdirSync(path.resolve('src', dir)) 15 | .filter(file => !exceptions.some(ex => ex === file)) 16 | 17 | return files.map(file => `**/${dir}/${file}`) 18 | }).reduce((flat, arr) => flat.concat(arr), []) 19 | 20 | module.exports = { 21 | mode: 'file', 22 | out: 'docs', 23 | module: 'commonjs', 24 | target: 'es5', 25 | theme: 'default', 26 | excludeExternals: true, 27 | includeDeclarations: true, 28 | excludePrivate: true, 29 | excludeNotExported: true, 30 | stripInternal: true, 31 | externalPattern: 'node_modules/@tensorflow', 32 | exclude 33 | } -------------------------------------------------------------------------------- /src/env/createNodejsEnv.ts: -------------------------------------------------------------------------------- 1 | import { createFileSystem } from './createFileSystem'; 2 | import { Environment } from './types'; 3 | 4 | export function createNodejsEnv(): Environment { 5 | 6 | const Canvas = global['Canvas'] || global['HTMLCanvasElement'] 7 | const Image = global['Image'] || global['HTMLImageElement'] 8 | 9 | const createCanvasElement = function() { 10 | if (Canvas) { 11 | return new Canvas() 12 | } 13 | throw new Error('createCanvasElement - missing Canvas implementation for nodejs environment') 14 | } 15 | 16 | const createImageElement = function() { 17 | if (Image) { 18 | return new Image() 19 | } 20 | throw new Error('createImageElement - missing Image implementation for nodejs environment') 21 | } 22 | 23 | const fetch = global['fetch'] || function() { 24 | throw new Error('fetch - missing fetch implementation for nodejs environment') 25 | } 26 | 27 | const fileSystem = createFileSystem() 28 | 29 | return { 30 | Canvas: Canvas || class {}, 31 | CanvasRenderingContext2D: global['CanvasRenderingContext2D'] || class {}, 32 | Image: Image || class {}, 33 | ImageData: global['ImageData'] || class {}, 34 | Video: global['HTMLVideoElement'] || class {}, 35 | createCanvasElement, 36 | createImageElement, 37 | fetch, 38 | ...fileSystem 39 | } 40 | } -------------------------------------------------------------------------------- /src/mtcnn/ONet.ts: -------------------------------------------------------------------------------- 1 | import * as tf from '@tensorflow/tfjs-core'; 2 | 3 | import { convLayer } from '../common'; 4 | import { fullyConnectedLayer } from '../common/fullyConnectedLayer'; 5 | import { prelu } from './prelu'; 6 | import { sharedLayer } from './sharedLayers'; 7 | import { ONetParams } from './types'; 8 | 9 | export function ONet(x: tf.Tensor4D, params: ONetParams): { scores: tf.Tensor1D, regions: tf.Tensor2D, points: tf.Tensor2D } { 10 | return tf.tidy(() => { 11 | 12 | let out = sharedLayer(x, params) 13 | out = tf.maxPool(out, [2, 2], [2, 2], 'same') 14 | out = convLayer(out, params.conv4, 'valid') 15 | out = prelu(out, params.prelu4_alpha) 16 | 17 | const vectorized = tf.reshape(out, [out.shape[0], params.fc1.weights.shape[0]]) as tf.Tensor2D 18 | const fc1 = fullyConnectedLayer(vectorized, params.fc1) 19 | const prelu5 = prelu(fc1, params.prelu5_alpha) 20 | const fc2_1 = fullyConnectedLayer(prelu5, params.fc2_1) 21 | const max = tf.expandDims(tf.max(fc2_1, 1), 1) 22 | 23 | const prob = tf.softmax(tf.sub(fc2_1, max), 1) as tf.Tensor2D 24 | const regions = fullyConnectedLayer(prelu5, params.fc2_2) 25 | const points = fullyConnectedLayer(prelu5, params.fc2_3) 26 | 27 | const scores = tf.unstack(prob, 1)[1] as tf.Tensor1D 28 | return { scores, regions, points } 29 | }) 30 | } -------------------------------------------------------------------------------- /test/data/mtcnnFaceLandmarkPositions.json: -------------------------------------------------------------------------------- 1 | [[{"x":117.85171800851822,"y":58.91067159175873},{"x":157.70139408111572,"y":64.48519098758698},{"x":142.3133249282837,"y":88.54254376888275},{"x":110.1661057472229,"y":99.86233913898468},{"x":149.25052666664124,"y":106.37608766555786}],[{"x":82.91613873839378,"y":292.6100924015045},{"x":133.91112035512924,"y":304.814593821764},{"x":104.43486452102661,"y":330.3951778411865},{"x":72.6984107196331,"y":342.63312900066376},{"x":120.51901644468307,"y":354.2677878141403}],[{"x":278.20400857925415,"y":273.8323953151703},{"x":318.7582621574402,"y":273.39686357975006},{"x":295.5427807569504,"y":300.43398427963257},{"x":279.5109224319458,"y":311.497838973999},{"x":317.0187101364136,"y":313.05305886268616}],[{"x":260.46802616119385,"y":82.86598253250122},{"x":305.55760955810547,"y":83.54110813140869},{"x":281.43571567535395,"y":113.98349380493164},{"x":257.0603914260864,"y":125.50608730316162},{"x":306.01917552948,"y":127.2098445892334}],[{"x":489.5882513225079,"y":224.56882098317146},{"x":534.514480471611,"y":223.28146517276764},{"x":507.20826017856604,"y":250.1718647480011},{"x":493.0139665305615,"y":271.0716395378113},{"x":530.7517347931862,"y":270.4143014550209}],[{"x":606.397784024477,"y":105.43332290649414},{"x":645.2468676567078,"y":111.50095802545547},{"x":625.1735819578171,"y":133.40740483999252},{"x":598.8033188581467,"y":141.26284581422806},{"x":637.2144679427147,"y":147.32198816537857}]] -------------------------------------------------------------------------------- /test/env.ts: -------------------------------------------------------------------------------- 1 | import * as tf from '@tensorflow/tfjs-core'; 2 | 3 | import { fetchImage, fetchJson, fetchNetWeights, NeuralNetwork } from '../src'; 4 | import { TestEnv } from './Environment'; 5 | 6 | jasmine.DEFAULT_TIMEOUT_INTERVAL = 60000 7 | 8 | if (typeof window !== 'undefined' && window['__karma__'] && (window['__karma__'].config.jasmine.args as string[]).some(arg => arg === 'backend_cpu')) { 9 | tf.setBackend('cpu') 10 | } 11 | 12 | async function loadImageBrowser(uri: string): Promise { 13 | return fetchImage(`base${uri.startsWith('/') ? '' : '/'}${uri}`) 14 | } 15 | 16 | async function loadJsonBrowser(uri: string): Promise { 17 | return fetchJson(`base${uri.startsWith('/') ? '' : '/'}${uri}`) 18 | } 19 | 20 | async function initNetBrowser>( 21 | net: TNet, 22 | uncompressedFilename: string | boolean, 23 | isUnusedModel: boolean = false 24 | ) { 25 | const url = uncompressedFilename 26 | ? await fetchNetWeights(`base/weights_uncompressed/${uncompressedFilename}`) 27 | : (isUnusedModel ? 'base/weights_unused' : 'base/weights') 28 | await net.load(url) 29 | } 30 | 31 | const browserTestEnv: TestEnv = { 32 | loadImage: loadImageBrowser, 33 | loadJson: loadJsonBrowser, 34 | initNet: initNetBrowser 35 | } 36 | 37 | export function getTestEnv(): TestEnv { 38 | return global['nodeTestEnv'] || browserTestEnv 39 | } 40 | 41 | -------------------------------------------------------------------------------- /test/tests/classes/LabeledFaceDescriptors.test.ts: -------------------------------------------------------------------------------- 1 | import { LabeledFaceDescriptors } from '../../../src'; 2 | 3 | describe('globalApi', () => { 4 | 5 | describe('LabeledFaceDescriptors', () => { 6 | 7 | const json = '{"label":"foo","descriptors":[[1,2,3],[4,5,6]]}'; 8 | const l1 = 'foo'; 9 | const f1 = new Float32Array([1, 2, 3]); 10 | const f2 = new Float32Array([4, 5, 6]); 11 | 12 | it('JSON.stringify()', () => { 13 | expect(JSON.stringify(new LabeledFaceDescriptors(l1, [f1,f2]))).toBe(json); 14 | expect(JSON.stringify({ ld: new LabeledFaceDescriptors(l1, [f1,f2]) })).toBe(`{"ld":${json}}`); 15 | expect(JSON.stringify([ new LabeledFaceDescriptors(l1, [f1,f2]) ])).toBe(`[${json}]`); 16 | }); 17 | 18 | it('fromJSON()', () => { 19 | const ld = LabeledFaceDescriptors.fromJSON(JSON.parse(json)); 20 | 21 | expect(ld.label).toBe(l1); 22 | expect(ld.descriptors.length).toBe(2); 23 | expect(ld.descriptors[0]).toEqual(f1); 24 | expect(ld.descriptors[1]).toEqual(f2); 25 | }); 26 | 27 | it('toJSON() => fromJSON()', () => { 28 | const ld = LabeledFaceDescriptors.fromJSON(new LabeledFaceDescriptors(l1, [f1,f2]).toJSON()); 29 | 30 | expect(ld.label).toBe(l1); 31 | expect(ld.descriptors.length).toBe(2); 32 | expect(ld.descriptors[0]).toEqual(f1); 33 | expect(ld.descriptors[1]).toEqual(f2); 34 | }); 35 | 36 | }); 37 | 38 | }); -------------------------------------------------------------------------------- /test/tests-legacy/mtcnn/expectMtcnnResults.ts: -------------------------------------------------------------------------------- 1 | import { IPoint, IRect } from '../../../src'; 2 | import { FaceLandmarks5 } from '../../../src/classes/FaceLandmarks5'; 3 | import { WithFaceDetection } from '../../../src/factories/WithFaceDetection'; 4 | import { WithFaceLandmarks } from '../../../src/factories/WithFaceLandmarks'; 5 | import { BoxAndLandmarksDeltas, expectFaceDetectionsWithLandmarks } from '../../expectFaceDetectionsWithLandmarks'; 6 | import { sortBoxes, sortByDistanceToOrigin } from '../../utils'; 7 | 8 | export const expectedMtcnnBoxes: IRect[] = sortBoxes([ 9 | { x: 70, y: 21, width: 112, height: 112 }, 10 | { x: 36, y: 250, width: 133, height: 132 }, 11 | { x: 221, y: 43, width: 112, height: 111 }, 12 | { x: 247, y: 231, width: 106, height: 107 }, 13 | { x: 566, y: 67, width: 104, height: 104 }, 14 | { x: 451, y: 176, width: 122, height: 122 } 15 | ]) 16 | 17 | export function expectMtcnnResults( 18 | results: WithFaceLandmarks, FaceLandmarks5>[], 19 | expectedMtcnnFaceLandmarks: IPoint[][], 20 | expectedScores: number[], 21 | deltas: BoxAndLandmarksDeltas 22 | ) { 23 | 24 | const expectedMtcnnFaceLandmarksSorted = sortByDistanceToOrigin(expectedMtcnnFaceLandmarks, obj => obj[0]) 25 | const expectedResults = expectedMtcnnBoxes 26 | .map((detection, i) => ({ detection, landmarks: expectedMtcnnFaceLandmarksSorted[i] })) 27 | 28 | return expectFaceDetectionsWithLandmarks(results, expectedResults, expectedScores, deltas) 29 | } -------------------------------------------------------------------------------- /src/faceRecognitionNet/residualLayer.ts: -------------------------------------------------------------------------------- 1 | import * as tf from '@tensorflow/tfjs-core'; 2 | 3 | import { conv, convDown, convNoRelu } from './convLayer'; 4 | import { ResidualLayerParams } from './types'; 5 | 6 | export function residual(x: tf.Tensor4D, params: ResidualLayerParams): tf.Tensor4D { 7 | let out = conv(x, params.conv1) 8 | out = convNoRelu(out, params.conv2) 9 | out = tf.add(out, x) 10 | out = tf.relu(out) 11 | return out 12 | } 13 | 14 | export function residualDown(x: tf.Tensor4D, params: ResidualLayerParams): tf.Tensor4D { 15 | let out = convDown(x, params.conv1) 16 | out = convNoRelu(out, params.conv2) 17 | 18 | let pooled = tf.avgPool(x, 2, 2, 'valid') as tf.Tensor4D 19 | const zeros = tf.zeros(pooled.shape) 20 | const isPad = pooled.shape[3] !== out.shape[3] 21 | const isAdjustShape = pooled.shape[1] !== out.shape[1] || pooled.shape[2] !== out.shape[2] 22 | 23 | if (isAdjustShape) { 24 | const padShapeX = [...out.shape] as [number, number, number, number] 25 | padShapeX[1] = 1 26 | const zerosW = tf.zeros(padShapeX) 27 | out = tf.concat([out, zerosW], 1) 28 | 29 | const padShapeY = [...out.shape] as [number, number, number, number] 30 | padShapeY[2] = 1 31 | const zerosH = tf.zeros(padShapeY) 32 | out = tf.concat([out, zerosH], 2) 33 | } 34 | 35 | pooled = isPad ? tf.concat([pooled, zeros], 3) : pooled 36 | out = tf.add(pooled, out) as tf.Tensor4D 37 | 38 | out = tf.relu(out) 39 | return out 40 | } -------------------------------------------------------------------------------- /src/faceFeatureExtractor/types.ts: -------------------------------------------------------------------------------- 1 | import * as tf from '@tensorflow/tfjs-core'; 2 | 3 | import { NetInput, TNetInput } from '..'; 4 | import { ConvParams, SeparableConvParams } from '../common'; 5 | import { NeuralNetwork } from '../NeuralNetwork'; 6 | 7 | export type ConvWithBatchNormParams = BatchNormParams & { 8 | filter: tf.Tensor4D 9 | } 10 | 11 | export type BatchNormParams = { 12 | mean: tf.Tensor1D 13 | variance: tf.Tensor1D 14 | scale: tf.Tensor1D 15 | offset: tf.Tensor1D 16 | } 17 | 18 | export type SeparableConvWithBatchNormParams = { 19 | depthwise: ConvWithBatchNormParams 20 | pointwise: ConvWithBatchNormParams 21 | } 22 | 23 | export type DenseBlock3Params = { 24 | conv0: SeparableConvParams | ConvParams 25 | conv1: SeparableConvParams 26 | conv2: SeparableConvParams 27 | } 28 | 29 | export type DenseBlock4Params = DenseBlock3Params & { 30 | conv3: SeparableConvParams 31 | } 32 | 33 | export type TinyFaceFeatureExtractorParams = { 34 | dense0: DenseBlock3Params 35 | dense1: DenseBlock3Params 36 | dense2: DenseBlock3Params 37 | } 38 | 39 | export type FaceFeatureExtractorParams = { 40 | dense0: DenseBlock4Params 41 | dense1: DenseBlock4Params 42 | dense2: DenseBlock4Params 43 | dense3: DenseBlock4Params 44 | } 45 | 46 | export interface IFaceFeatureExtractor extends NeuralNetwork { 47 | forwardInput(input: NetInput): tf.Tensor4D 48 | forward(input: TNetInput): Promise 49 | } -------------------------------------------------------------------------------- /src/resizeResults.ts: -------------------------------------------------------------------------------- 1 | import { Dimensions, IDimensions } from './classes'; 2 | import { FaceDetection } from './classes/FaceDetection'; 3 | import { FaceLandmarks } from './classes/FaceLandmarks'; 4 | import { extendWithFaceDetection, isWithFaceDetection } from './factories/WithFaceDetection'; 5 | import { extendWithFaceLandmarks, isWithFaceLandmarks } from './factories/WithFaceLandmarks'; 6 | 7 | export function resizeResults(results: T, dimensions: IDimensions): T { 8 | 9 | const { width, height } = new Dimensions(dimensions.width, dimensions.height) 10 | 11 | if (width <= 0 || height <= 0) { 12 | throw new Error(`resizeResults - invalid dimensions: ${JSON.stringify({ width, height })}`) 13 | } 14 | 15 | if (Array.isArray(results)) { 16 | return results.map(obj => resizeResults(obj, { width, height })) as any as T 17 | } 18 | 19 | if (isWithFaceLandmarks(results)) { 20 | const resizedDetection = results.detection.forSize(width, height) 21 | const resizedLandmarks = results.unshiftedLandmarks.forSize(resizedDetection.box.width, resizedDetection.box.height) 22 | 23 | return extendWithFaceLandmarks(extendWithFaceDetection(results, resizedDetection), resizedLandmarks) 24 | } 25 | 26 | if (isWithFaceDetection(results)) { 27 | return extendWithFaceDetection(results, results.detection.forSize(width, height)) 28 | } 29 | 30 | if (results instanceof FaceLandmarks || results instanceof FaceDetection) { 31 | return (results as any).forSize(width, height) 32 | } 33 | 34 | return results 35 | } -------------------------------------------------------------------------------- /src/classes/ObjectDetection.ts: -------------------------------------------------------------------------------- 1 | import { Box } from './Box'; 2 | import { Dimensions, IDimensions } from './Dimensions'; 3 | import { IRect, Rect } from './Rect'; 4 | 5 | export class ObjectDetection { 6 | private _score: number 7 | private _classScore: number 8 | private _className: string 9 | private _box: Rect 10 | private _imageDims: Dimensions 11 | 12 | constructor( 13 | score: number, 14 | classScore: number, 15 | className: string, 16 | relativeBox: IRect, 17 | imageDims: IDimensions 18 | ) { 19 | this._imageDims = new Dimensions(imageDims.width, imageDims.height) 20 | this._score = score 21 | this._classScore = classScore 22 | this._className = className 23 | this._box = new Box(relativeBox).rescale(this._imageDims) 24 | } 25 | 26 | public get score(): number { return this._score } 27 | public get classScore(): number { return this._classScore } 28 | public get className(): string { return this._className } 29 | public get box(): Box { return this._box } 30 | public get imageDims(): Dimensions { return this._imageDims } 31 | public get imageWidth(): number { return this.imageDims.width } 32 | public get imageHeight(): number { return this.imageDims.height } 33 | public get relativeBox(): Box { return new Box(this._box).rescale(this.imageDims.reverse()) } 34 | 35 | public forSize(width: number, height: number): ObjectDetection { 36 | return new ObjectDetection( 37 | this.score, 38 | this.classScore, 39 | this.className, 40 | this.relativeBox, 41 | { width, height} 42 | ) 43 | } 44 | } -------------------------------------------------------------------------------- /examples/examples-browser/public/styles.css: -------------------------------------------------------------------------------- 1 | .page-container { 2 | left: 0; 3 | right: 0; 4 | margin: auto; 5 | margin-top: 20px; 6 | padding-left: 280px; 7 | display: inline-flex !important; 8 | } 9 | 10 | @media only screen and (max-width : 992px) { 11 | .page-container { 12 | padding-left: 0; 13 | display: flex !important; 14 | } 15 | } 16 | 17 | #navbar { 18 | position: absolute; 19 | top: 20px; 20 | left: 20px; 21 | } 22 | 23 | .center-content { 24 | display: flex; 25 | flex-direction: column; 26 | justify-content: center; 27 | align-items: center; 28 | flex-wrap: wrap; 29 | } 30 | 31 | .side-by-side { 32 | display: flex; 33 | justify-content: center; 34 | align-items: center; 35 | } 36 | .side-by-side >* { 37 | margin: 0 5px; 38 | } 39 | 40 | .bold { 41 | font-weight: bold; 42 | } 43 | 44 | .margin-sm { 45 | margin: 5px; 46 | } 47 | 48 | .margin { 49 | margin: 20px; 50 | } 51 | 52 | .button-sm { 53 | padding: 0 10px !important; 54 | } 55 | 56 | .pad-sides-sm { 57 | padding: 0 8px !important; 58 | } 59 | 60 | #github-link { 61 | display: flex !important; 62 | justify-content: center; 63 | align-items: center; 64 | border-bottom: 1px solid; 65 | margin-bottom: 10px; 66 | } 67 | 68 | #overlay, .overlay { 69 | position: absolute; 70 | top: 0; 71 | left: 0; 72 | } 73 | 74 | #facesContainer canvas { 75 | margin: 10px; 76 | } 77 | 78 | input[type="file"]::-webkit-file-upload-button { 79 | background: #26a69a; 80 | border: 1px solid gray; 81 | cursor: pointer; 82 | color: #fff; 83 | border-radius: .2em; 84 | } -------------------------------------------------------------------------------- /test/expectFaceDetectionsWithLandmarks.ts: -------------------------------------------------------------------------------- 1 | import { FaceLandmarks } from '../src/classes/FaceLandmarks'; 2 | import { FaceLandmarks68 } from '../src/classes/FaceLandmarks68'; 3 | import { WithFaceDetection } from '../src/factories/WithFaceDetection'; 4 | import { WithFaceLandmarks } from '../src/factories/WithFaceLandmarks'; 5 | import { ExpectedFaceDetectionWithLandmarks, expectPointsClose, expectRectClose, sortByFaceDetection } from './utils'; 6 | 7 | export type BoxAndLandmarksDeltas = { 8 | maxScoreDelta: number 9 | maxBoxDelta: number 10 | maxLandmarksDelta: number 11 | } 12 | 13 | export function expectFaceDetectionsWithLandmarks( 14 | results: WithFaceLandmarks, TFaceLandmarks>[], 15 | allExpectedFullFaceDescriptions: ExpectedFaceDetectionWithLandmarks[], 16 | expectedScores: number[], 17 | deltas: BoxAndLandmarksDeltas 18 | ) { 19 | 20 | const expectedFullFaceDescriptions = expectedScores 21 | .map((score, i) => ({ 22 | score, 23 | ...allExpectedFullFaceDescriptions[i] 24 | })) 25 | .filter(expected => expected.score !== -1) 26 | 27 | const sortedResults = sortByFaceDetection(results) 28 | 29 | expectedFullFaceDescriptions.forEach((expected, i) => { 30 | const { detection, landmarks } = sortedResults[i] 31 | expect(Math.abs(detection.score - expected.score)).toBeLessThan(deltas.maxScoreDelta) 32 | expectRectClose(detection.box, expected.detection, deltas.maxBoxDelta) 33 | expectPointsClose(landmarks.positions, expected.landmarks, deltas.maxLandmarksDelta) 34 | }) 35 | } -------------------------------------------------------------------------------- /src/tinyFaceDetector/TinyFaceDetector.ts: -------------------------------------------------------------------------------- 1 | import * as tf from '@tensorflow/tfjs-core'; 2 | 3 | import { FaceDetection, Point } from '../classes'; 4 | import { ParamMapping } from '../common'; 5 | import { TNetInput } from '../dom'; 6 | import { ITinyYolov2Options } from '../tinyYolov2'; 7 | import { TinyYolov2Base } from '../tinyYolov2/TinyYolov2Base'; 8 | import { TinyYolov2NetParams } from '../tinyYolov2/types'; 9 | import { BOX_ANCHORS, IOU_THRESHOLD, MEAN_RGB } from './const'; 10 | 11 | export class TinyFaceDetector extends TinyYolov2Base { 12 | 13 | constructor() { 14 | const config = { 15 | withSeparableConvs: true, 16 | iouThreshold: IOU_THRESHOLD, 17 | classes: ['face'], 18 | anchors: BOX_ANCHORS, 19 | meanRgb: MEAN_RGB, 20 | isFirstLayerConv2d: true, 21 | filterSizes: [3, 16, 32, 64, 128, 256, 512] 22 | } 23 | 24 | super(config) 25 | } 26 | 27 | public get anchors(): Point[] { 28 | return this.config.anchors 29 | } 30 | 31 | public async locateFaces(input: TNetInput, forwardParams: ITinyYolov2Options): Promise { 32 | const objectDetections = await this.detect(input, forwardParams) 33 | return objectDetections.map(det => new FaceDetection(det.score, det.relativeBox, { width: det.imageWidth, height: det.imageHeight })) 34 | } 35 | 36 | protected getDefaultModelName(): string { 37 | return 'tiny_face_detector_model' 38 | } 39 | 40 | protected extractParamsFromWeigthMap(weightMap: tf.NamedTensorMap): { params: TinyYolov2NetParams, paramMappings: ParamMapping[] } { 41 | return super.extractParamsFromWeigthMap(weightMap) 42 | } 43 | } -------------------------------------------------------------------------------- /test/tests/ssdMobilenetv1/ssdMobilenetv1.locateFaces.test.ts: -------------------------------------------------------------------------------- 1 | import * as faceapi from '../../../src'; 2 | import { getTestEnv } from '../../env'; 3 | import { expectFaceDetections } from '../../expectFaceDetections'; 4 | import { describeWithBackend, describeWithNets } from '../../utils'; 5 | import { expectedSsdBoxes } from './expectedBoxes'; 6 | 7 | describeWithBackend('ssdMobilenetv1.locateFaces', () => { 8 | 9 | let imgEl: HTMLImageElement 10 | 11 | beforeAll(async () => { 12 | imgEl = await getTestEnv().loadImage('test/images/faces.jpg') 13 | }) 14 | 15 | describeWithNets('quantized weights', { withSsdMobilenetv1: { quantized: true } }, ({ ssdMobilenetv1 }) => { 16 | 17 | it('scores > 0.7', async () => { 18 | const detections = await ssdMobilenetv1.locateFaces(imgEl, { minConfidence: 0.7 }) as faceapi.FaceDetection[] 19 | 20 | expect(detections.length).toEqual(4) 21 | 22 | const expectedScores = [-1, 0.81, 0.97, 0.88, 0.84, -1] 23 | const maxScoreDelta = 0.05 24 | const maxBoxDelta = 4 25 | 26 | expectFaceDetections(detections, expectedSsdBoxes, expectedScores, maxScoreDelta, maxBoxDelta) 27 | }) 28 | 29 | it('scores > 0.5', async () => { 30 | const detections = await ssdMobilenetv1.locateFaces(imgEl, { minConfidence: 0.5 }) as faceapi.FaceDetection[] 31 | 32 | expect(detections.length).toEqual(6) 33 | 34 | const expectedScores = [0.54, 0.81, 0.97, 0.88, 0.84, 0.61] 35 | const maxScoreDelta = 0.05 36 | const maxBoxDelta = 5 37 | 38 | expectFaceDetections(detections, expectedSsdBoxes, expectedScores, maxScoreDelta, maxBoxDelta) 39 | }) 40 | 41 | }) 42 | 43 | }) -------------------------------------------------------------------------------- /src/draw/drawFaceExpressions.ts: -------------------------------------------------------------------------------- 1 | import { IPoint, Point } from '../classes'; 2 | import { FaceExpressions } from '../faceExpressionNet'; 3 | import { isWithFaceDetection } from '../factories/WithFaceDetection'; 4 | import { isWithFaceExpressions, WithFaceExpressions } from '../factories/WithFaceExpressions'; 5 | import { round } from '../utils'; 6 | import { DrawTextField } from './DrawTextField'; 7 | 8 | export type DrawFaceExpressionsInput = FaceExpressions | WithFaceExpressions<{}> 9 | 10 | export function drawFaceExpressions( 11 | canvasArg: string | HTMLCanvasElement, 12 | faceExpressions: DrawFaceExpressionsInput | Array, 13 | minConfidence = 0.1, 14 | textFieldAnchor?: IPoint 15 | ) { 16 | const faceExpressionsArray = Array.isArray(faceExpressions) ? faceExpressions : [faceExpressions] 17 | 18 | faceExpressionsArray.forEach(e => { 19 | const expr = e instanceof FaceExpressions 20 | ? e 21 | : (isWithFaceExpressions(e) ? e.expressions : undefined) 22 | if (!expr) { 23 | throw new Error('drawFaceExpressions - expected faceExpressions to be FaceExpressions | WithFaceExpressions<{}> or array thereof') 24 | } 25 | 26 | const sorted = expr.asSortedArray() 27 | const resultsToDisplay = sorted.filter(expr => expr.probability > minConfidence) 28 | 29 | const anchor = isWithFaceDetection(e) 30 | ? e.detection.box.bottomLeft 31 | : (textFieldAnchor || new Point(0, 0)) 32 | 33 | const drawTextField = new DrawTextField( 34 | resultsToDisplay.map(expr => `${expr.expression} (${round(expr.probability)})`), 35 | anchor 36 | ) 37 | drawTextField.draw(canvasArg) 38 | }) 39 | } -------------------------------------------------------------------------------- /test/tests-legacy/faceRecognitionNet.uncompressed.test.ts: -------------------------------------------------------------------------------- 1 | import { createCanvasFromMedia, euclideanDistance } from '../../src'; 2 | import { getTestEnv } from '../env'; 3 | import { describeWithBackend, describeWithNets } from '../utils'; 4 | 5 | describeWithBackend('faceRecognitionNet, uncompressed', () => { 6 | 7 | let imgEl1: HTMLCanvasElement 8 | let imgElRect: HTMLCanvasElement 9 | let faceDescriptor1: number[] 10 | let faceDescriptorRect: number[] 11 | 12 | beforeAll(async () => { 13 | imgEl1 = createCanvasFromMedia(await getTestEnv().loadImage('test/images/face1.png')) 14 | imgElRect = createCanvasFromMedia(await getTestEnv().loadImage('test/images/face_rectangular.png')) 15 | faceDescriptor1 = await getTestEnv().loadJson('test/data/faceDescriptor1.json') 16 | faceDescriptorRect = await getTestEnv().loadJson('test/data/faceDescriptorRect.json') 17 | }) 18 | 19 | describeWithNets('uncompressed weights', { withFaceRecognitionNet: { quantized: false } }, ({ faceRecognitionNet }) => { 20 | 21 | it('computes face descriptor for squared input', async () => { 22 | const result = await faceRecognitionNet.computeFaceDescriptor(imgEl1) as Float32Array 23 | expect(result.length).toEqual(128) 24 | expect(euclideanDistance(result, faceDescriptor1)).toBeLessThan(0.1) 25 | }) 26 | 27 | it('computes face descriptor for rectangular input', async () => { 28 | const result = await faceRecognitionNet.computeFaceDescriptor(imgElRect) as Float32Array 29 | expect(result.length).toEqual(128) 30 | expect(euclideanDistance(result, faceDescriptorRect)).toBeLessThan(0.1) 31 | }) 32 | 33 | }) 34 | }) -------------------------------------------------------------------------------- /src/faceFeatureExtractor/loadParamsFactory.ts: -------------------------------------------------------------------------------- 1 | import { extractWeightEntryFactory, loadSeparableConvParamsFactory, ParamMapping } from '../common'; 2 | import { loadConvParamsFactory } from '../common/loadConvParamsFactory'; 3 | import { DenseBlock3Params, DenseBlock4Params } from './types'; 4 | 5 | export function loadParamsFactory(weightMap: any, paramMappings: ParamMapping[]) { 6 | 7 | const extractWeightEntry = extractWeightEntryFactory(weightMap, paramMappings) 8 | 9 | const extractConvParams = loadConvParamsFactory(extractWeightEntry) 10 | const extractSeparableConvParams = loadSeparableConvParamsFactory(extractWeightEntry) 11 | 12 | function extractDenseBlock3Params(prefix: string, isFirstLayer: boolean = false): DenseBlock3Params { 13 | const conv0 = isFirstLayer 14 | ? extractConvParams(`${prefix}/conv0`) 15 | : extractSeparableConvParams(`${prefix}/conv0`) 16 | const conv1 = extractSeparableConvParams(`${prefix}/conv1`) 17 | const conv2 = extractSeparableConvParams(`${prefix}/conv2`) 18 | 19 | return { conv0, conv1, conv2 } 20 | } 21 | 22 | function extractDenseBlock4Params(prefix: string, isFirstLayer: boolean = false): DenseBlock4Params { 23 | const conv0 = isFirstLayer 24 | ? extractConvParams(`${prefix}/conv0`) 25 | : extractSeparableConvParams(`${prefix}/conv0`) 26 | const conv1 = extractSeparableConvParams(`${prefix}/conv1`) 27 | const conv2 = extractSeparableConvParams(`${prefix}/conv2`) 28 | const conv3 = extractSeparableConvParams(`${prefix}/conv3`) 29 | 30 | return { conv0, conv1, conv2, conv3 } 31 | } 32 | 33 | return { 34 | extractDenseBlock3Params, 35 | extractDenseBlock4Params 36 | } 37 | } -------------------------------------------------------------------------------- /test/tests-legacy/ssdMobilenetv1.locateFaces.uncompressed.test.ts: -------------------------------------------------------------------------------- 1 | import * as faceapi from '../../src'; 2 | import { getTestEnv } from '../env'; 3 | import { expectFaceDetections } from '../expectFaceDetections'; 4 | import { describeWithBackend, describeWithNets } from '../utils'; 5 | import { expectedSsdBoxes } from '../tests/ssdMobilenetv1/expectedBoxes'; 6 | 7 | describeWithBackend('ssdMobilenetv1.locateFaces, uncompressed', () => { 8 | 9 | let imgEl: HTMLImageElement 10 | 11 | beforeAll(async () => { 12 | imgEl = await getTestEnv().loadImage('test/images/faces.jpg') 13 | }) 14 | 15 | describeWithNets('uncompressed weights', { withSsdMobilenetv1: { quantized: false } }, ({ ssdMobilenetv1 }) => { 16 | 17 | it('scores > 0.8', async () => { 18 | const detections = await ssdMobilenetv1.locateFaces(imgEl, { minConfidence: 0.8 }) as faceapi.FaceDetection[] 19 | 20 | expect(detections.length).toEqual(3) 21 | 22 | const expectedScores = [-1, -1, 0.98, 0.88, 0.81, -1] 23 | const maxScoreDelta = 0.05 24 | const maxBoxDelta = 5 25 | 26 | expectFaceDetections(detections, expectedSsdBoxes, expectedScores, maxScoreDelta, maxBoxDelta) 27 | }) 28 | 29 | it('scores > 0.5', async () => { 30 | const detections = await ssdMobilenetv1.locateFaces(imgEl, { minConfidence: 0.5 }) as faceapi.FaceDetection[] 31 | 32 | expect(detections.length).toEqual(6) 33 | 34 | const expectedScores = [0.57, 0.76, 0.98, 0.88, 0.81, 0.58] 35 | const maxScoreDelta = 0.05 36 | const maxBoxDelta = 5 37 | 38 | expectFaceDetections(detections, expectedSsdBoxes, expectedScores, maxScoreDelta, maxBoxDelta) 39 | }) 40 | 41 | }) 42 | 43 | }) -------------------------------------------------------------------------------- /src/factories/WithFaceLandmarks.ts: -------------------------------------------------------------------------------- 1 | import { FaceDetection } from '../classes/FaceDetection'; 2 | import { FaceLandmarks } from '../classes/FaceLandmarks'; 3 | import { FaceLandmarks68 } from '../classes/FaceLandmarks68'; 4 | import { isWithFaceDetection, WithFaceDetection } from './WithFaceDetection'; 5 | 6 | export type WithFaceLandmarks< 7 | TSource extends WithFaceDetection<{}>, 8 | TFaceLandmarks extends FaceLandmarks = FaceLandmarks68 9 | > = TSource & { 10 | landmarks: TFaceLandmarks 11 | unshiftedLandmarks: TFaceLandmarks 12 | alignedRect: FaceDetection 13 | } 14 | 15 | export function isWithFaceLandmarks(obj: any): obj is WithFaceLandmarks, FaceLandmarks> { 16 | return isWithFaceDetection(obj) 17 | && obj['landmarks'] instanceof FaceLandmarks 18 | && obj['unshiftedLandmarks'] instanceof FaceLandmarks 19 | && obj['alignedRect'] instanceof FaceDetection 20 | } 21 | 22 | export function extendWithFaceLandmarks< 23 | TSource extends WithFaceDetection<{}>, 24 | TFaceLandmarks extends FaceLandmarks = FaceLandmarks68 25 | > ( 26 | sourceObj: TSource, 27 | unshiftedLandmarks: TFaceLandmarks 28 | ): WithFaceLandmarks { 29 | 30 | const { box: shift } = sourceObj.detection 31 | const landmarks = unshiftedLandmarks.shiftBy(shift.x, shift.y) 32 | 33 | const rect = landmarks.align() 34 | const { imageDims } = sourceObj.detection 35 | const alignedRect = new FaceDetection(sourceObj.detection.score, rect.rescale(imageDims.reverse()), imageDims) 36 | 37 | const extension = { 38 | landmarks, 39 | unshiftedLandmarks, 40 | alignedRect 41 | } 42 | 43 | return Object.assign({}, sourceObj, extension) 44 | } -------------------------------------------------------------------------------- /src/common/extractSeparableConvParamsFactory.ts: -------------------------------------------------------------------------------- 1 | import * as tf from '@tensorflow/tfjs-core'; 2 | 3 | import { ExtractWeightsFunction, ParamMapping, SeparableConvParams } from './types'; 4 | 5 | export function extractSeparableConvParamsFactory( 6 | extractWeights: ExtractWeightsFunction, 7 | paramMappings: ParamMapping[] 8 | ) { 9 | 10 | return function(channelsIn: number, channelsOut: number, mappedPrefix: string): SeparableConvParams { 11 | const depthwise_filter = tf.tensor4d(extractWeights(3 * 3 * channelsIn), [3, 3, channelsIn, 1]) 12 | const pointwise_filter = tf.tensor4d(extractWeights(channelsIn * channelsOut), [1, 1, channelsIn, channelsOut]) 13 | const bias = tf.tensor1d(extractWeights(channelsOut)) 14 | 15 | paramMappings.push( 16 | { paramPath: `${mappedPrefix}/depthwise_filter` }, 17 | { paramPath: `${mappedPrefix}/pointwise_filter` }, 18 | { paramPath: `${mappedPrefix}/bias` } 19 | ) 20 | 21 | return new SeparableConvParams( 22 | depthwise_filter, 23 | pointwise_filter, 24 | bias 25 | ) 26 | } 27 | 28 | } 29 | 30 | export function loadSeparableConvParamsFactory( 31 | extractWeightEntry: (originalPath: string, paramRank: number) => T 32 | ) { 33 | 34 | return function (prefix: string): SeparableConvParams { 35 | const depthwise_filter = extractWeightEntry(`${prefix}/depthwise_filter`, 4) 36 | const pointwise_filter = extractWeightEntry(`${prefix}/pointwise_filter`, 4) 37 | const bias = extractWeightEntry(`${prefix}/bias`, 1) 38 | 39 | return new SeparableConvParams( 40 | depthwise_filter, 41 | pointwise_filter, 42 | bias 43 | ) 44 | } 45 | 46 | } 47 | -------------------------------------------------------------------------------- /test/expectFullFaceDescriptions.ts: -------------------------------------------------------------------------------- 1 | import { euclideanDistance } from '../src/euclideanDistance'; 2 | import { WithFaceDescriptor } from '../src/factories/WithFaceDescriptor'; 3 | import { WithFaceDetection } from '../src/factories/WithFaceDetection'; 4 | import { WithFaceLandmarks } from '../src/factories/WithFaceLandmarks'; 5 | import { BoxAndLandmarksDeltas } from './expectFaceDetectionsWithLandmarks'; 6 | import { ExpectedFullFaceDescription, expectPointClose, expectRectClose, sortByFaceDetection } from './utils'; 7 | 8 | export type FullFaceDescriptionDeltas = BoxAndLandmarksDeltas & { 9 | maxDescriptorDelta: number 10 | } 11 | 12 | export function expectFullFaceDescriptions( 13 | results: WithFaceDescriptor>>[], 14 | allExpectedFullFaceDescriptions: ExpectedFullFaceDescription[], 15 | expectedScores: number[], 16 | deltas: FullFaceDescriptionDeltas 17 | ) { 18 | 19 | const expectedFullFaceDescriptions = expectedScores 20 | .map((score, i) => ({ 21 | score, 22 | ...allExpectedFullFaceDescriptions[i] 23 | })) 24 | .filter(expected => expected.score !== -1) 25 | 26 | const sortedResults = sortByFaceDetection(results) 27 | 28 | expectedFullFaceDescriptions.forEach((expected, i) => { 29 | const { detection, landmarks, descriptor } = sortedResults[i] 30 | expect(Math.abs(detection.score - expected.score)).toBeLessThan(deltas.maxScoreDelta) 31 | expectRectClose(detection.box, expected.detection, deltas.maxBoxDelta) 32 | landmarks.positions.forEach((pt, j) => expectPointClose(pt, expected.landmarks[j], deltas.maxLandmarksDelta)) 33 | expect(euclideanDistance(descriptor, expected.descriptor)).toBeLessThan(deltas.maxDescriptorDelta) 34 | }) 35 | } -------------------------------------------------------------------------------- /src/faceFeatureExtractor/extractorsFactory.ts: -------------------------------------------------------------------------------- 1 | import { 2 | extractConvParamsFactory, 3 | extractSeparableConvParamsFactory, 4 | ExtractWeightsFunction, 5 | ParamMapping, 6 | } from '../common'; 7 | import { DenseBlock3Params, DenseBlock4Params } from './types'; 8 | 9 | export function extractorsFactory(extractWeights: ExtractWeightsFunction, paramMappings: ParamMapping[]) { 10 | 11 | const extractConvParams = extractConvParamsFactory(extractWeights, paramMappings) 12 | const extractSeparableConvParams = extractSeparableConvParamsFactory(extractWeights, paramMappings) 13 | 14 | function extractDenseBlock3Params(channelsIn: number, channelsOut: number, mappedPrefix: string, isFirstLayer: boolean = false): DenseBlock3Params { 15 | 16 | const conv0 = isFirstLayer 17 | ? extractConvParams(channelsIn, channelsOut, 3, `${mappedPrefix}/conv0`) 18 | : extractSeparableConvParams(channelsIn, channelsOut, `${mappedPrefix}/conv0`) 19 | const conv1 = extractSeparableConvParams(channelsOut, channelsOut, `${mappedPrefix}/conv1`) 20 | const conv2 = extractSeparableConvParams(channelsOut, channelsOut, `${mappedPrefix}/conv2`) 21 | 22 | return { conv0, conv1, conv2 } 23 | } 24 | 25 | function extractDenseBlock4Params(channelsIn: number, channelsOut: number, mappedPrefix: string, isFirstLayer: boolean = false): DenseBlock4Params { 26 | 27 | const { conv0, conv1, conv2 } = extractDenseBlock3Params(channelsIn, channelsOut, mappedPrefix, isFirstLayer) 28 | const conv3 = extractSeparableConvParams(channelsOut, channelsOut, `${mappedPrefix}/conv3`) 29 | 30 | return { conv0, conv1, conv2, conv3 } 31 | } 32 | 33 | return { 34 | extractDenseBlock3Params, 35 | extractDenseBlock4Params 36 | } 37 | 38 | } -------------------------------------------------------------------------------- /karma.conf.js: -------------------------------------------------------------------------------- 1 | const dataFiles = [ 2 | 'test/images/*.jpg', 3 | 'test/images/*.png', 4 | 'test/data/*.json', 5 | 'test/data/*.weights', 6 | 'test/media/*.mp4', 7 | 'weights/**/*', 8 | 'weights_uncompressed/**/*', 9 | 'weights_unused/**/*' 10 | ].map(pattern => ({ 11 | pattern, 12 | watched: false, 13 | included: false, 14 | served: true, 15 | nocache: false 16 | })) 17 | 18 | let exclude = ( 19 | process.env.UUT 20 | ? [ 21 | 'dom', 22 | 'faceLandmarkNet', 23 | 'faceRecognitionNet', 24 | 'ssdMobilenetv1', 25 | 'tinyFaceDetector' 26 | ] 27 | : [] 28 | ) 29 | .filter(ex => ex !== process.env.UUT) 30 | .map(ex => `test/tests/${ex}/*.ts`) 31 | 32 | // exclude nodejs tests 33 | exclude = exclude.concat(['**/*.node.test.ts']) 34 | exclude = exclude.concat(['test/env.node.ts']) 35 | exclude = exclude.concat(['test/tests-legacy/**/*.ts']) 36 | 37 | 38 | module.exports = function(config) { 39 | const args = [] 40 | if (process.env.BACKEND_CPU) { 41 | args.push('backend_cpu') 42 | } 43 | 44 | config.set({ 45 | frameworks: ['jasmine', 'karma-typescript'], 46 | files: [ 47 | 'src/**/*.ts', 48 | 'test/**/*.ts' 49 | ].concat(dataFiles), 50 | exclude, 51 | preprocessors: { 52 | '**/*.ts': ['karma-typescript'] 53 | }, 54 | karmaTypescriptConfig: { 55 | tsconfig: 'tsconfig.test.json' 56 | }, 57 | browsers: ['Chrome'], 58 | browserNoActivityTimeout: 120000, 59 | browserDisconnectTolerance: 3, 60 | browserDisconnectTimeout : 120000, 61 | captureTimeout: 60000, 62 | client: { 63 | jasmine: { 64 | timeoutInterval: 60000, 65 | args 66 | } 67 | } 68 | }) 69 | } 70 | -------------------------------------------------------------------------------- /test/tests/tinyFaceDetector/tinyFaceDetector.locateFaces.test.ts: -------------------------------------------------------------------------------- 1 | import * as faceapi from '../../../src'; 2 | import { getTestEnv } from '../../env'; 3 | import { expectedTinyFaceDetectorBoxes } from '../../expectedTinyFaceDetectorBoxes'; 4 | import { expectFaceDetections } from '../../expectFaceDetections'; 5 | import { describeWithBackend, describeWithNets } from '../../utils'; 6 | 7 | describeWithBackend('tinyFaceDetector.locateFaces', () => { 8 | 9 | let imgEl: HTMLImageElement 10 | 11 | beforeAll(async () => { 12 | imgEl = await getTestEnv().loadImage('test/images/faces.jpg') 13 | }) 14 | 15 | describeWithNets('quantized weights', { withTinyFaceDetector: { quantized: true } }, ({ tinyFaceDetector }) => { 16 | 17 | it('inputSize 320, finds all faces', async () => { 18 | const detections = await tinyFaceDetector.locateFaces(imgEl, { inputSize: 320 }) as faceapi.FaceDetection[] 19 | 20 | expect(detections.length).toEqual(6) 21 | 22 | const expectedScores = [0.77, 0.75, 0.88, 0.77, 0.83, 0.85] 23 | const maxScoreDelta = 0.05 24 | const maxBoxDelta = 40 25 | 26 | expectFaceDetections(detections, expectedTinyFaceDetectorBoxes, expectedScores, maxScoreDelta, maxBoxDelta) 27 | }) 28 | 29 | it('inputSize 416, finds all faces', async () => { 30 | const detections = await tinyFaceDetector.locateFaces(imgEl, { inputSize: 416 }) as faceapi.FaceDetection[] 31 | 32 | expect(detections.length).toEqual(6) 33 | 34 | const expectedScores = [0.7, 0.82, 0.93, 0.86, 0.79, 0.84] 35 | const maxScoreDelta = 0.05 36 | const maxBoxDelta = 5 37 | 38 | expectFaceDetections(detections, expectedTinyFaceDetectorBoxes, expectedScores, maxScoreDelta, maxBoxDelta) 39 | }) 40 | 41 | }) 42 | 43 | }) -------------------------------------------------------------------------------- /src/ops/padToSquare.ts: -------------------------------------------------------------------------------- 1 | import * as tf from '@tensorflow/tfjs-core'; 2 | 3 | /** 4 | * Pads the smaller dimension of an image tensor with zeros, such that width === height. 5 | * 6 | * @param imgTensor The image tensor. 7 | * @param isCenterImage (optional, default: false) If true, add an equal amount of padding on 8 | * both sides of the minor dimension oof the image. 9 | * @returns The padded tensor with width === height. 10 | */ 11 | export function padToSquare( 12 | imgTensor: tf.Tensor4D, 13 | isCenterImage: boolean = false 14 | ): tf.Tensor4D { 15 | return tf.tidy(() => { 16 | 17 | const [height, width] = imgTensor.shape.slice(1) 18 | if (height === width) { 19 | return imgTensor 20 | } 21 | 22 | const dimDiff = Math.abs(height - width) 23 | const paddingAmount = Math.round(dimDiff * (isCenterImage ? 0.5 : 1)) 24 | const paddingAxis = height > width ? 2 : 1 25 | 26 | const createPaddingTensor = (paddingAmount: number): tf.Tensor => { 27 | const paddingTensorShape = imgTensor.shape.slice() 28 | paddingTensorShape[paddingAxis] = paddingAmount 29 | return tf.fill(paddingTensorShape, 0) 30 | } 31 | 32 | const paddingTensorAppend = createPaddingTensor(paddingAmount) 33 | const remainingPaddingAmount = dimDiff - (paddingTensorAppend.shape[paddingAxis] as number) 34 | 35 | const paddingTensorPrepend = isCenterImage && remainingPaddingAmount 36 | ? createPaddingTensor(remainingPaddingAmount) 37 | : null 38 | 39 | const tensorsToStack = [ 40 | paddingTensorPrepend, 41 | imgTensor, 42 | paddingTensorAppend 43 | ] 44 | .filter(t => !!t) 45 | .map((t: tf.Tensor) => t.toFloat()) as tf.Tensor4D[] 46 | return tf.concat(tensorsToStack, paddingAxis) 47 | }) 48 | } -------------------------------------------------------------------------------- /examples/examples-browser/public/js/imageSelectionControls.js: -------------------------------------------------------------------------------- 1 | async function onSelectedImageChanged(uri) { 2 | const img = await faceapi.fetchImage(uri) 3 | $(`#inputImg`).get(0).src = img.src 4 | updateResults() 5 | } 6 | 7 | async function loadImageFromUrl(url) { 8 | const img = await requestExternalImage($('#imgUrlInput').val()) 9 | $('#inputImg').get(0).src = img.src 10 | updateResults() 11 | } 12 | 13 | async function loadImageFromUpload() { 14 | const imgFile = $('#queryImgUploadInput').get(0).files[0] 15 | const img = await faceapi.bufferToImage(imgFile) 16 | $('#inputImg').get(0).src = img.src 17 | updateResults() 18 | } 19 | 20 | function renderImageSelectList(selectListId, onChange, initialValue, withFaceExpressionImages) { 21 | let images = [1, 2, 3, 4, 5].map(idx => `bbt${idx}.jpg`) 22 | 23 | if (withFaceExpressionImages) { 24 | images = [ 25 | 'happy.jpg', 26 | 'sad.jpg', 27 | 'angry.jpg', 28 | 'disgusted.jpg', 29 | 'surprised.jpg', 30 | 'fearful.jpg', 31 | 'neutral.jpg' 32 | ].concat(images) 33 | } 34 | 35 | function renderChildren(select) { 36 | images.forEach(imageName => 37 | renderOption( 38 | select, 39 | imageName, 40 | imageName 41 | ) 42 | ) 43 | } 44 | 45 | renderSelectList( 46 | selectListId, 47 | onChange, 48 | initialValue, 49 | renderChildren 50 | ) 51 | } 52 | 53 | function initImageSelectionControls(initialValue = 'bbt1.jpg', withFaceExpressionImages = false) { 54 | renderImageSelectList( 55 | '#selectList', 56 | async (uri) => { 57 | await onSelectedImageChanged(uri) 58 | }, 59 | initialValue, 60 | withFaceExpressionImages 61 | ) 62 | onSelectedImageChanged($('#selectList select').val()) 63 | } -------------------------------------------------------------------------------- /examples/examples-browser/public/js/bbt.js: -------------------------------------------------------------------------------- 1 | const classes = ['amy', 'bernadette', 'howard', 'leonard', 'penny', 'raj', 'sheldon', 'stuart'] 2 | 3 | function getFaceImageUri(className, idx) { 4 | return `${className}/${className}${idx}.png` 5 | } 6 | 7 | function renderFaceImageSelectList(selectListId, onChange, initialValue) { 8 | const indices = [1, 2, 3, 4, 5] 9 | function renderChildren(select) { 10 | classes.forEach(className => { 11 | const optgroup = document.createElement('optgroup') 12 | optgroup.label = className 13 | select.appendChild(optgroup) 14 | indices.forEach(imageIdx => 15 | renderOption( 16 | optgroup, 17 | `${className} ${imageIdx}`, 18 | getFaceImageUri(className, imageIdx) 19 | ) 20 | ) 21 | }) 22 | } 23 | 24 | renderSelectList( 25 | selectListId, 26 | onChange, 27 | getFaceImageUri(initialValue.className, initialValue.imageIdx), 28 | renderChildren 29 | ) 30 | } 31 | 32 | // fetch first image of each class and compute their descriptors 33 | async function createBbtFaceMatcher(numImagesForTraining = 1) { 34 | const maxAvailableImagesPerClass = 5 35 | numImagesForTraining = Math.min(numImagesForTraining, maxAvailableImagesPerClass) 36 | 37 | const labeledFaceDescriptors = await Promise.all(classes.map( 38 | async className => { 39 | const descriptors = [] 40 | for (let i = 1; i < (numImagesForTraining + 1); i++) { 41 | const img = await faceapi.fetchImage(getFaceImageUri(className, i)) 42 | descriptors.push(await faceapi.computeFaceDescriptor(img)) 43 | } 44 | 45 | return new faceapi.LabeledFaceDescriptors( 46 | className, 47 | descriptors 48 | ) 49 | } 50 | )) 51 | 52 | return new faceapi.FaceMatcher(labeledFaceDescriptors) 53 | } -------------------------------------------------------------------------------- /test/tests/factories/WithFaceLandmarks.test.ts: -------------------------------------------------------------------------------- 1 | import { extendWithFaceDetection, extendWithFaceLandmarks, FaceDetection, FaceLandmarks68, Point, Rect } from '../../../src'; 2 | 3 | const detection = new FaceDetection(1.0, new Rect(0.5, 0.5, 0.5, 0.5), { width: 100, height: 100 }) 4 | const unshiftedLandmarks = new FaceLandmarks68(Array(68).fill(0).map((_, i) => new Point(i / 100, i / 100)), { width: 100, height: 100 }) 5 | 6 | const makeSrcObjectWithFaceDetection = (srcObject: T) => extendWithFaceDetection(srcObject, detection) 7 | 8 | describe('extendWithFaceDetection', () => { 9 | 10 | it('returns WithFaceLandmarks', () => { 11 | 12 | const srcObj = {} 13 | const srcObjWithFaceDetection = makeSrcObjectWithFaceDetection(srcObj) 14 | const withFaceLandmarks = extendWithFaceLandmarks(srcObjWithFaceDetection, unshiftedLandmarks) 15 | 16 | expect(withFaceLandmarks.detection).toEqual(detection) 17 | expect(withFaceLandmarks.unshiftedLandmarks).toEqual(unshiftedLandmarks) 18 | expect(withFaceLandmarks.alignedRect instanceof FaceDetection).toBe(true) 19 | expect(withFaceLandmarks.landmarks instanceof FaceLandmarks68).toBe(true) 20 | 21 | }) 22 | 23 | it('extends source object', () => { 24 | 25 | const srcObj = { srcProp: { foo: true } } 26 | const srcObjWithFaceDetection = makeSrcObjectWithFaceDetection(srcObj) 27 | const withFaceLandmarks = extendWithFaceLandmarks(srcObjWithFaceDetection, unshiftedLandmarks) 28 | 29 | expect(withFaceLandmarks.srcProp).toEqual(srcObj.srcProp) 30 | 31 | expect(withFaceLandmarks.detection).toEqual(detection) 32 | expect(withFaceLandmarks.unshiftedLandmarks).toEqual(unshiftedLandmarks) 33 | expect(withFaceLandmarks.alignedRect instanceof FaceDetection).toBe(true) 34 | expect(withFaceLandmarks.landmarks instanceof FaceLandmarks68).toBe(true) 35 | 36 | }) 37 | 38 | }) 39 | -------------------------------------------------------------------------------- /src/faceExpressionNet/FaceExpressionNet.ts: -------------------------------------------------------------------------------- 1 | import * as tf from '@tensorflow/tfjs-core'; 2 | 3 | import { NetInput, TNetInput, toNetInput } from '../dom'; 4 | import { FaceFeatureExtractor } from '../faceFeatureExtractor/FaceFeatureExtractor'; 5 | import { FaceFeatureExtractorParams } from '../faceFeatureExtractor/types'; 6 | import { FaceProcessor } from '../faceProcessor/FaceProcessor'; 7 | import { FaceExpressions } from './FaceExpressions'; 8 | 9 | export class FaceExpressionNet extends FaceProcessor { 10 | 11 | constructor(faceFeatureExtractor: FaceFeatureExtractor = new FaceFeatureExtractor()) { 12 | super('FaceExpressionNet', faceFeatureExtractor) 13 | } 14 | 15 | public forwardInput(input: NetInput | tf.Tensor4D): tf.Tensor2D { 16 | return tf.tidy(() => tf.softmax(this.runNet(input))) 17 | } 18 | 19 | public async forward(input: TNetInput): Promise { 20 | return this.forwardInput(await toNetInput(input)) 21 | } 22 | 23 | public async predictExpressions(input: TNetInput) { 24 | const netInput = await toNetInput(input) 25 | const out = await this.forwardInput(netInput) 26 | const probabilitesByBatch = await Promise.all(tf.unstack(out).map(async t => { 27 | const data = await t.data() 28 | t.dispose() 29 | return data 30 | })) 31 | out.dispose() 32 | 33 | const predictionsByBatch = probabilitesByBatch 34 | .map(probabilites => new FaceExpressions(probabilites as Float32Array)) 35 | 36 | return netInput.isBatchInput 37 | ? predictionsByBatch 38 | : predictionsByBatch[0] 39 | } 40 | 41 | protected getDefaultModelName(): string { 42 | return 'face_expression_model' 43 | } 44 | 45 | protected getClassifierChannelsIn(): number { 46 | return 256 47 | } 48 | 49 | protected getClassifierChannelsOut(): number { 50 | return 7 51 | } 52 | } -------------------------------------------------------------------------------- /src/mtcnn/extractImagePatches.ts: -------------------------------------------------------------------------------- 1 | import * as tf from '@tensorflow/tfjs-core'; 2 | 3 | import { Box, IDimensions } from '../classes'; 4 | import { createCanvas, createCanvasFromMedia, getContext2dOrThrow } from '../dom'; 5 | import { env } from '../env'; 6 | import { normalize } from './normalize'; 7 | 8 | export async function extractImagePatches( 9 | img: HTMLCanvasElement, 10 | boxes: Box[], 11 | { width, height }: IDimensions 12 | ): Promise { 13 | 14 | 15 | const imgCtx = getContext2dOrThrow(img) 16 | 17 | const bitmaps = await Promise.all(boxes.map(async box => { 18 | // TODO: correct padding 19 | const { y, ey, x, ex } = box.padAtBorders(img.height, img.width) 20 | 21 | const fromX = x - 1 22 | const fromY = y - 1 23 | const imgData = imgCtx.getImageData(fromX, fromY, (ex - fromX), (ey - fromY)) 24 | 25 | return env.isNodejs() ? createCanvasFromMedia(imgData) : createImageBitmap(imgData) 26 | })) 27 | 28 | const imagePatchesDatas: number[][] = [] 29 | 30 | bitmaps.forEach(bmp => { 31 | const patch = createCanvas({ width, height }) 32 | const patchCtx = getContext2dOrThrow(patch) 33 | patchCtx.drawImage(bmp, 0, 0, width, height) 34 | const { data } = patchCtx.getImageData(0, 0, width, height) 35 | 36 | const currData = [] 37 | // RGBA -> BGR 38 | for(let i = 0; i < data.length; i+=4) { 39 | currData.push(data[i + 2]) 40 | currData.push(data[i + 1]) 41 | currData.push(data[i]) 42 | } 43 | imagePatchesDatas.push(currData) 44 | }) 45 | 46 | 47 | return imagePatchesDatas.map(data => { 48 | const t = tf.tidy(() => { 49 | const imagePatchTensor = tf.transpose( 50 | tf.tensor4d(data, [1, width, height, 3]), 51 | [0, 2, 1, 3] 52 | ).toFloat() as tf.Tensor4D 53 | 54 | return normalize(imagePatchTensor) 55 | }) 56 | return t 57 | }) 58 | } -------------------------------------------------------------------------------- /src/globalApi/allFaces.ts: -------------------------------------------------------------------------------- 1 | import { TNetInput } from '../dom'; 2 | import { WithFaceDescriptor, WithFaceDetection, WithFaceLandmarks } from '../factories'; 3 | import { IMtcnnOptions, MtcnnOptions } from '../mtcnn/MtcnnOptions'; 4 | import { SsdMobilenetv1Options } from '../ssdMobilenetv1'; 5 | import { ITinyYolov2Options, TinyYolov2Options } from '../tinyYolov2'; 6 | import { detectAllFaces } from './detectFaces'; 7 | 8 | // export allFaces API for backward compatibility 9 | 10 | export async function allFacesSsdMobilenetv1( 11 | input: TNetInput, 12 | minConfidence?: number 13 | ): Promise>>[]> { 14 | console.warn('allFacesSsdMobilenetv1 is deprecated and will be removed soon, use the high level api instead') 15 | return await detectAllFaces(input, new SsdMobilenetv1Options(minConfidence ? { minConfidence } : {})) 16 | .withFaceLandmarks() 17 | .withFaceDescriptors() 18 | } 19 | 20 | export async function allFacesTinyYolov2( 21 | input: TNetInput, 22 | forwardParams: ITinyYolov2Options = {} 23 | ): Promise>>[]> { 24 | console.warn('allFacesTinyYolov2 is deprecated and will be removed soon, use the high level api instead') 25 | return await detectAllFaces(input, new TinyYolov2Options(forwardParams)) 26 | .withFaceLandmarks() 27 | .withFaceDescriptors() 28 | } 29 | 30 | export async function allFacesMtcnn( 31 | input: TNetInput, 32 | forwardParams: IMtcnnOptions = {} 33 | ): Promise>>[]> { 34 | console.warn('allFacesMtcnn is deprecated and will be removed soon, use the high level api instead') 35 | return await detectAllFaces(input, new MtcnnOptions(forwardParams)) 36 | .withFaceLandmarks() 37 | .withFaceDescriptors() 38 | } 39 | 40 | export const allFaces = allFacesSsdMobilenetv1 41 | -------------------------------------------------------------------------------- /src/dom/extractFaceTensors.ts: -------------------------------------------------------------------------------- 1 | import * as tf from '@tensorflow/tfjs-core'; 2 | 3 | import { Rect } from '../classes'; 4 | import { FaceDetection } from '../classes/FaceDetection'; 5 | import { isTensor3D, isTensor4D } from '../utils'; 6 | 7 | /** 8 | * Extracts the tensors of the image regions containing the detected faces. 9 | * Useful if you want to compute the face descriptors for the face images. 10 | * Using this method is faster then extracting a canvas for each face and 11 | * converting them to tensors individually. 12 | * 13 | * @param imageTensor The image tensor that face detection has been performed on. 14 | * @param detections The face detection results or face bounding boxes for that image. 15 | * @returns Tensors of the corresponding image region for each detected face. 16 | */ 17 | export async function extractFaceTensors( 18 | imageTensor: tf.Tensor3D | tf.Tensor4D, 19 | detections: Array 20 | ): Promise { 21 | 22 | if (!isTensor3D(imageTensor) && !isTensor4D(imageTensor)) { 23 | throw new Error('extractFaceTensors - expected image tensor to be 3D or 4D') 24 | } 25 | 26 | if (isTensor4D(imageTensor) && imageTensor.shape[0] > 1) { 27 | throw new Error('extractFaceTensors - batchSize > 1 not supported') 28 | } 29 | 30 | return tf.tidy(() => { 31 | const [imgHeight, imgWidth, numChannels] = imageTensor.shape.slice(isTensor4D(imageTensor) ? 1 : 0) 32 | 33 | const boxes = detections.map( 34 | det => det instanceof FaceDetection 35 | ? det.forSize(imgWidth, imgHeight).box 36 | : det 37 | ) 38 | .map(box => box.clipAtImageBorders(imgWidth, imgHeight)) 39 | 40 | const faceTensors = boxes.map(({ x, y, width, height }) => 41 | tf.slice3d(imageTensor.as3D(imgHeight, imgWidth, numChannels), [y, x, 0], [height, width, numChannels]) 42 | ) 43 | 44 | return faceTensors 45 | }) 46 | } -------------------------------------------------------------------------------- /src/tinyYolov2/config.ts: -------------------------------------------------------------------------------- 1 | import { Point } from '../classes/Point'; 2 | 3 | export type TinyYolov2Config = { 4 | withSeparableConvs: boolean 5 | iouThreshold: number 6 | anchors: Point[] 7 | classes: string[] 8 | meanRgb?: [number, number, number] 9 | withClassScores?: boolean, 10 | filterSizes?: number[] 11 | isFirstLayerConv2d?: boolean 12 | } 13 | 14 | const isNumber = (arg: any) => typeof arg === 'number' 15 | 16 | export function validateConfig(config: any) { 17 | if (!config) { 18 | throw new Error(`invalid config: ${config}`) 19 | } 20 | 21 | if (typeof config.withSeparableConvs !== 'boolean') { 22 | throw new Error(`config.withSeparableConvs has to be a boolean, have: ${config.withSeparableConvs}`) 23 | } 24 | 25 | if (!isNumber(config.iouThreshold) || config.iouThreshold < 0 || config.iouThreshold > 1.0) { 26 | throw new Error(`config.iouThreshold has to be a number between [0, 1], have: ${config.iouThreshold}`) 27 | } 28 | 29 | if ( 30 | !Array.isArray(config.classes) 31 | || !config.classes.length 32 | || !config.classes.every((c: any) => typeof c === 'string') 33 | ) { 34 | 35 | throw new Error(`config.classes has to be an array class names: string[], have: ${JSON.stringify(config.classes)}`) 36 | } 37 | 38 | if ( 39 | !Array.isArray(config.anchors) 40 | || !config.anchors.length 41 | || !config.anchors.map((a: any) => a || {}).every((a: any) => isNumber(a.x) && isNumber(a.y)) 42 | ) { 43 | 44 | throw new Error(`config.anchors has to be an array of { x: number, y: number }, have: ${JSON.stringify(config.anchors)}`) 45 | } 46 | 47 | if (config.meanRgb && ( 48 | !Array.isArray(config.meanRgb) 49 | || config.meanRgb.length !== 3 50 | || !config.meanRgb.every(isNumber) 51 | )) { 52 | 53 | throw new Error(`config.meanRgb has to be an array of shape [number, number, number], have: ${JSON.stringify(config.meanRgb)}`) 54 | } 55 | } -------------------------------------------------------------------------------- /src/dom/extractFaces.ts: -------------------------------------------------------------------------------- 1 | import { FaceDetection } from '../classes/FaceDetection'; 2 | import { Rect } from '../classes/Rect'; 3 | import { env } from '../env'; 4 | import { createCanvas } from './createCanvas'; 5 | import { getContext2dOrThrow } from './getContext2dOrThrow'; 6 | import { imageTensorToCanvas } from './imageTensorToCanvas'; 7 | import { toNetInput } from './toNetInput'; 8 | import { TNetInput } from './types'; 9 | 10 | /** 11 | * Extracts the image regions containing the detected faces. 12 | * 13 | * @param input The image that face detection has been performed on. 14 | * @param detections The face detection results or face bounding boxes for that image. 15 | * @returns The Canvases of the corresponding image region for each detected face. 16 | */ 17 | export async function extractFaces( 18 | input: TNetInput, 19 | detections: Array 20 | ): Promise { 21 | 22 | const { Canvas } = env.getEnv() 23 | 24 | let canvas = input as HTMLCanvasElement 25 | 26 | if (!(input instanceof Canvas)) { 27 | const netInput = await toNetInput(input) 28 | 29 | if (netInput.batchSize > 1) { 30 | throw new Error('extractFaces - batchSize > 1 not supported') 31 | } 32 | 33 | const tensorOrCanvas = netInput.getInput(0) 34 | canvas = tensorOrCanvas instanceof Canvas 35 | ? tensorOrCanvas 36 | : await imageTensorToCanvas(tensorOrCanvas) 37 | } 38 | 39 | const ctx = getContext2dOrThrow(canvas) 40 | const boxes = detections.map( 41 | det => det instanceof FaceDetection 42 | ? det.forSize(canvas.width, canvas.height).box.floor() 43 | : det 44 | ) 45 | .map(box => box.clipAtImageBorders(canvas.width, canvas.height)) 46 | 47 | return boxes.map(({ x, y, width, height }) => { 48 | const faceImg = createCanvas({ width, height }) 49 | getContext2dOrThrow(faceImg) 50 | .putImageData(ctx.getImageData(x, y, width, height), 0, 0) 51 | return faceImg 52 | }) 53 | } -------------------------------------------------------------------------------- /examples/examples-nodejs/faceRecognition.ts: -------------------------------------------------------------------------------- 1 | import * as faceapi from 'face-api.js'; 2 | 3 | import { canvas, faceDetectionNet, faceDetectionOptions, saveFile } from './commons'; 4 | 5 | const REFERENCE_IMAGE = '../images/bbt1.jpg' 6 | const QUERY_IMAGE = '../images/bbt4.jpg' 7 | 8 | async function run() { 9 | 10 | await faceDetectionNet.loadFromDisk('../../weights') 11 | await faceapi.nets.faceLandmark68Net.loadFromDisk('../../weights') 12 | await faceapi.nets.faceRecognitionNet.loadFromDisk('../../weights') 13 | 14 | const referenceImage = await canvas.loadImage(REFERENCE_IMAGE) 15 | const queryImage = await canvas.loadImage(QUERY_IMAGE) 16 | 17 | const resultsRef = await faceapi.detectAllFaces(referenceImage, faceDetectionOptions) 18 | .withFaceLandmarks() 19 | .withFaceDescriptors() 20 | 21 | const resultsQuery = await faceapi.detectAllFaces(queryImage, faceDetectionOptions) 22 | .withFaceLandmarks() 23 | .withFaceDescriptors() 24 | 25 | const faceMatcher = new faceapi.FaceMatcher(resultsRef) 26 | 27 | const labels = faceMatcher.labeledDescriptors 28 | .map(ld => ld.label) 29 | const refDrawBoxes = resultsRef 30 | .map(res => res.detection.box) 31 | .map((box, i) => new faceapi.draw.DrawBox(box, { label: labels[i] })) 32 | const outRef = faceapi.createCanvasFromMedia(referenceImage) 33 | refDrawBoxes.forEach(drawBox => drawBox.draw(outRef)) 34 | 35 | saveFile('referenceImage.jpg', (outRef as any).toBuffer('image/jpeg')) 36 | 37 | const queryDrawBoxes = resultsQuery.map(res => { 38 | const bestMatch = faceMatcher.findBestMatch(res.descriptor) 39 | return new faceapi.draw.DrawBox(res.detection.box, { label: bestMatch.toString() }) 40 | }) 41 | const outQuery = faceapi.createCanvasFromMedia(queryImage) 42 | queryDrawBoxes.forEach(drawBox => drawBox.draw(outQuery)) 43 | saveFile('queryImage.jpg', (outQuery as any).toBuffer('image/jpeg')) 44 | console.log('done, saved results to out/queryImage.jpg') 45 | } 46 | 47 | run() -------------------------------------------------------------------------------- /src/draw/DrawBox.ts: -------------------------------------------------------------------------------- 1 | import { Box, IBoundingBox, IRect } from '../classes'; 2 | import { getContext2dOrThrow } from '../dom/getContext2dOrThrow'; 3 | import { AnchorPosition, DrawTextField, DrawTextFieldOptions, IDrawTextFieldOptions } from './DrawTextField'; 4 | 5 | export interface IDrawBoxOptions { 6 | boxColor?: string 7 | lineWidth?: number 8 | drawLabelOptions?: IDrawTextFieldOptions 9 | label?: string 10 | } 11 | 12 | export class DrawBoxOptions { 13 | public boxColor: string 14 | public lineWidth: number 15 | public drawLabelOptions: DrawTextFieldOptions 16 | public label?: string 17 | 18 | constructor(options: IDrawBoxOptions = {}) { 19 | const { boxColor, lineWidth, label, drawLabelOptions } = options 20 | this.boxColor = boxColor || 'rgba(0, 0, 255, 1)' 21 | this.lineWidth = lineWidth || 2 22 | this.label = label 23 | 24 | const defaultDrawLabelOptions = { 25 | anchorPosition: AnchorPosition.BOTTOM_LEFT, 26 | backgroundColor: this.boxColor 27 | } 28 | this.drawLabelOptions = new DrawTextFieldOptions(Object.assign({}, defaultDrawLabelOptions, drawLabelOptions)) 29 | } 30 | } 31 | 32 | export class DrawBox { 33 | public box: Box 34 | public options: DrawBoxOptions 35 | 36 | constructor( 37 | box: IBoundingBox | IRect, 38 | options: IDrawBoxOptions = {} 39 | ) { 40 | this.box = new Box(box) 41 | this.options = new DrawBoxOptions(options) 42 | } 43 | 44 | draw(canvasArg: string | HTMLCanvasElement | CanvasRenderingContext2D) { 45 | const ctx = getContext2dOrThrow(canvasArg) 46 | 47 | const { boxColor, lineWidth } = this.options 48 | 49 | const { x, y, width, height } = this.box 50 | ctx.strokeStyle = boxColor 51 | ctx.lineWidth = lineWidth 52 | ctx.strokeRect(x, y, width, height) 53 | 54 | const { label } = this.options 55 | if (label) { 56 | new DrawTextField([label], { x: x - (lineWidth / 2), y }, this.options.drawLabelOptions).draw(canvasArg) 57 | } 58 | } 59 | } -------------------------------------------------------------------------------- /src/faceFeatureExtractor/FaceFeatureExtractor.ts: -------------------------------------------------------------------------------- 1 | import * as tf from '@tensorflow/tfjs-core'; 2 | 3 | import { NetInput, TNetInput, toNetInput } from '../dom'; 4 | import { NeuralNetwork } from '../NeuralNetwork'; 5 | import { normalize } from '../ops'; 6 | import { denseBlock4 } from './denseBlock'; 7 | import { extractParams } from './extractParams'; 8 | import { extractParamsFromWeigthMap } from './extractParamsFromWeigthMap'; 9 | import { FaceFeatureExtractorParams, IFaceFeatureExtractor } from './types'; 10 | 11 | export class FaceFeatureExtractor extends NeuralNetwork implements IFaceFeatureExtractor { 12 | 13 | constructor() { 14 | super('FaceFeatureExtractor') 15 | } 16 | 17 | public forwardInput(input: NetInput): tf.Tensor4D { 18 | 19 | const { params } = this 20 | 21 | if (!params) { 22 | throw new Error('FaceFeatureExtractor - load model before inference') 23 | } 24 | 25 | return tf.tidy(() => { 26 | const batchTensor = input.toBatchTensor(112, true) 27 | const meanRgb = [122.782, 117.001, 104.298] 28 | const normalized = normalize(batchTensor, meanRgb).div(tf.scalar(255)) as tf.Tensor4D 29 | 30 | let out = denseBlock4(normalized, params.dense0, true) 31 | out = denseBlock4(out, params.dense1) 32 | out = denseBlock4(out, params.dense2) 33 | out = denseBlock4(out, params.dense3) 34 | out = tf.avgPool(out, [7, 7], [2, 2], 'valid') 35 | 36 | return out 37 | }) 38 | } 39 | 40 | public async forward(input: TNetInput): Promise { 41 | return this.forwardInput(await toNetInput(input)) 42 | } 43 | 44 | protected getDefaultModelName(): string { 45 | return 'face_feature_extractor_model' 46 | } 47 | 48 | protected extractParamsFromWeigthMap(weightMap: tf.NamedTensorMap) { 49 | return extractParamsFromWeigthMap(weightMap) 50 | } 51 | 52 | protected extractParams(weights: Float32Array) { 53 | return extractParams(weights) 54 | } 55 | } -------------------------------------------------------------------------------- /src/faceFeatureExtractor/TinyFaceFeatureExtractor.ts: -------------------------------------------------------------------------------- 1 | import * as tf from '@tensorflow/tfjs-core'; 2 | 3 | import { NetInput, TNetInput, toNetInput } from '../dom'; 4 | import { NeuralNetwork } from '../NeuralNetwork'; 5 | import { normalize } from '../ops'; 6 | import { denseBlock3 } from './denseBlock'; 7 | import { extractParamsFromWeigthMapTiny } from './extractParamsFromWeigthMapTiny'; 8 | import { extractParamsTiny } from './extractParamsTiny'; 9 | import { IFaceFeatureExtractor, TinyFaceFeatureExtractorParams } from './types'; 10 | 11 | export class TinyFaceFeatureExtractor extends NeuralNetwork implements IFaceFeatureExtractor { 12 | 13 | constructor() { 14 | super('TinyFaceFeatureExtractor') 15 | } 16 | 17 | public forwardInput(input: NetInput): tf.Tensor4D { 18 | 19 | const { params } = this 20 | 21 | if (!params) { 22 | throw new Error('TinyFaceFeatureExtractor - load model before inference') 23 | } 24 | 25 | return tf.tidy(() => { 26 | const batchTensor = input.toBatchTensor(112, true) 27 | const meanRgb = [122.782, 117.001, 104.298] 28 | const normalized = normalize(batchTensor, meanRgb).div(tf.scalar(255)) as tf.Tensor4D 29 | 30 | let out = denseBlock3(normalized, params.dense0, true) 31 | out = denseBlock3(out, params.dense1) 32 | out = denseBlock3(out, params.dense2) 33 | out = tf.avgPool(out, [14, 14], [2, 2], 'valid') 34 | 35 | return out 36 | }) 37 | } 38 | 39 | public async forward(input: TNetInput): Promise { 40 | return this.forwardInput(await toNetInput(input)) 41 | } 42 | 43 | protected getDefaultModelName(): string { 44 | return 'face_feature_extractor_tiny_model' 45 | } 46 | 47 | protected extractParamsFromWeigthMap(weightMap: tf.NamedTensorMap) { 48 | return extractParamsFromWeigthMapTiny(weightMap) 49 | } 50 | 51 | protected extractParams(weights: Float32Array) { 52 | return extractParamsTiny(weights) 53 | } 54 | } -------------------------------------------------------------------------------- /src/utils/index.ts: -------------------------------------------------------------------------------- 1 | import * as tf from '@tensorflow/tfjs-core'; 2 | 3 | import { Point } from '../classes'; 4 | import { Dimensions, IDimensions } from '../classes/Dimensions'; 5 | 6 | export function isTensor(tensor: any, dim: number) { 7 | return tensor instanceof tf.Tensor && tensor.shape.length === dim 8 | } 9 | 10 | export function isTensor1D(tensor: any): tensor is tf.Tensor1D { 11 | return isTensor(tensor, 1) 12 | } 13 | 14 | export function isTensor2D(tensor: any): tensor is tf.Tensor2D { 15 | return isTensor(tensor, 2) 16 | } 17 | 18 | export function isTensor3D(tensor: any): tensor is tf.Tensor3D { 19 | return isTensor(tensor, 3) 20 | } 21 | 22 | export function isTensor4D(tensor: any): tensor is tf.Tensor4D { 23 | return isTensor(tensor, 4) 24 | } 25 | 26 | export function isFloat(num: number) { 27 | return num % 1 !== 0 28 | } 29 | 30 | export function isEven(num: number) { 31 | return num % 2 === 0 32 | } 33 | 34 | export function round(num: number, prec: number = 2) { 35 | const f = Math.pow(10, prec) 36 | return Math.floor(num * f) / f 37 | } 38 | 39 | export function isDimensions(obj: any): boolean { 40 | return obj && obj.width && obj.height 41 | } 42 | 43 | export function computeReshapedDimensions({ width, height }: IDimensions, inputSize: number) { 44 | const scale = inputSize / Math.max(height, width) 45 | return new Dimensions(Math.round(width * scale), Math.round(height * scale)) 46 | } 47 | 48 | export function getCenterPoint(pts: Point[]): Point { 49 | return pts.reduce((sum, pt) => sum.add(pt), new Point(0, 0)) 50 | .div(new Point(pts.length, pts.length)) 51 | } 52 | 53 | export function range(num: number, start: number, step: number): number[] { 54 | return Array(num).fill(0).map((_, i) => start + (i * step)) 55 | } 56 | 57 | export function isValidNumber(num: any) { 58 | return !!num && num !== Infinity && num !== -Infinity && !isNaN(num) || num === 0 59 | } 60 | 61 | export function isValidProbablitiy(num: any) { 62 | return isValidNumber(num) && 0 <= num && num <= 1.0 63 | } -------------------------------------------------------------------------------- /src/tinyYolov2/TinyYolov2.ts: -------------------------------------------------------------------------------- 1 | import * as tf from '@tensorflow/tfjs-core'; 2 | 3 | import { FaceDetection, Point } from '../classes'; 4 | import { ParamMapping } from '../common/types'; 5 | import { TNetInput } from '../dom/types'; 6 | import { 7 | BOX_ANCHORS, 8 | BOX_ANCHORS_SEPARABLE, 9 | DEFAULT_MODEL_NAME, 10 | DEFAULT_MODEL_NAME_SEPARABLE_CONV, 11 | IOU_THRESHOLD, 12 | MEAN_RGB_SEPARABLE, 13 | } from './const'; 14 | import { TinyYolov2Base } from './TinyYolov2Base'; 15 | import { ITinyYolov2Options } from './TinyYolov2Options'; 16 | import { TinyYolov2NetParams } from './types'; 17 | 18 | export class TinyYolov2 extends TinyYolov2Base { 19 | 20 | constructor(withSeparableConvs: boolean = true) { 21 | const config = Object.assign({}, { 22 | withSeparableConvs, 23 | iouThreshold: IOU_THRESHOLD, 24 | classes: ['face'] 25 | }, 26 | withSeparableConvs 27 | ? { 28 | anchors: BOX_ANCHORS_SEPARABLE, 29 | meanRgb: MEAN_RGB_SEPARABLE 30 | } 31 | : { 32 | anchors: BOX_ANCHORS, 33 | withClassScores: true 34 | } 35 | ) 36 | 37 | super(config) 38 | } 39 | 40 | public get withSeparableConvs(): boolean { 41 | return this.config.withSeparableConvs 42 | } 43 | 44 | public get anchors(): Point[] { 45 | return this.config.anchors 46 | } 47 | 48 | public async locateFaces(input: TNetInput, forwardParams: ITinyYolov2Options): Promise { 49 | const objectDetections = await this.detect(input, forwardParams) 50 | return objectDetections.map(det => new FaceDetection(det.score, det.relativeBox, { width: det.imageWidth, height: det.imageHeight })) 51 | } 52 | 53 | protected getDefaultModelName(): string { 54 | return this.withSeparableConvs ? DEFAULT_MODEL_NAME_SEPARABLE_CONV : DEFAULT_MODEL_NAME 55 | } 56 | 57 | protected extractParamsFromWeigthMap(weightMap: tf.NamedTensorMap): { params: TinyYolov2NetParams, paramMappings: ParamMapping[] } { 58 | return super.extractParamsFromWeigthMap(weightMap) 59 | } 60 | } -------------------------------------------------------------------------------- /src/globalApi/extractFacesAndComputeResults.ts: -------------------------------------------------------------------------------- 1 | import * as tf from '@tensorflow/tfjs-core'; 2 | 3 | import { FaceDetection } from '../classes/FaceDetection'; 4 | import { extractFaces, extractFaceTensors, TNetInput } from '../dom'; 5 | import { WithFaceDetection } from '../factories/WithFaceDetection'; 6 | import { isWithFaceLandmarks, WithFaceLandmarks } from '../factories/WithFaceLandmarks'; 7 | 8 | export async function extractAllFacesAndComputeResults, TResult>( 9 | parentResults: TSource[], 10 | input: TNetInput, 11 | computeResults: (faces: Array) => Promise, 12 | extractedFaces?: Array | null, 13 | getRectForAlignment: (parentResult: WithFaceLandmarks) => FaceDetection = ({ alignedRect }) => alignedRect 14 | ) { 15 | const faceBoxes = parentResults.map(parentResult => 16 | isWithFaceLandmarks(parentResult) 17 | ? getRectForAlignment(parentResult) 18 | : parentResult.detection 19 | ) 20 | const faces: Array = extractedFaces || ( 21 | input instanceof tf.Tensor 22 | ? await extractFaceTensors(input, faceBoxes) 23 | : await extractFaces(input, faceBoxes) 24 | ) 25 | 26 | const results = await computeResults(faces) 27 | 28 | faces.forEach(f => f instanceof tf.Tensor && f.dispose()) 29 | 30 | return results 31 | } 32 | 33 | export async function extractSingleFaceAndComputeResult, TResult>( 34 | parentResult: TSource, 35 | input: TNetInput, 36 | computeResult: (face: HTMLCanvasElement | tf.Tensor3D) => Promise, 37 | extractedFaces?: Array | null, 38 | getRectForAlignment?: (parentResult: WithFaceLandmarks) => FaceDetection 39 | ) { 40 | return extractAllFacesAndComputeResults( 41 | [parentResult], 42 | input, 43 | async faces => computeResult(faces[0]), 44 | extractedFaces, 45 | getRectForAlignment 46 | ) 47 | } -------------------------------------------------------------------------------- /src/env/index.ts: -------------------------------------------------------------------------------- 1 | import { createBrowserEnv } from './createBrowserEnv'; 2 | import { createFileSystem } from './createFileSystem'; 3 | import { createNodejsEnv } from './createNodejsEnv'; 4 | import { isBrowser } from './isBrowser'; 5 | import { isNodejs } from './isNodejs'; 6 | import { Environment } from './types'; 7 | 8 | let environment: Environment | null 9 | 10 | function getEnv(): Environment { 11 | if (!environment) { 12 | throw new Error('getEnv - environment is not defined, check isNodejs() and isBrowser()') 13 | } 14 | return environment 15 | } 16 | 17 | function setEnv(env: Environment) { 18 | environment = env 19 | } 20 | 21 | function initialize() { 22 | // check for isBrowser() first to prevent electron renderer process 23 | // to be initialized with wrong environment due to isNodejs() returning true 24 | if (isBrowser()) { 25 | return setEnv(createBrowserEnv()) 26 | } 27 | if (isNodejs()) { 28 | return setEnv(createNodejsEnv()) 29 | } 30 | } 31 | 32 | function monkeyPatch(env: Partial) { 33 | if (!environment) { 34 | initialize() 35 | } 36 | 37 | if (!environment) { 38 | throw new Error('monkeyPatch - environment is not defined, check isNodejs() and isBrowser()') 39 | } 40 | 41 | const { Canvas = environment.Canvas, Image = environment.Image } = env 42 | environment.Canvas = Canvas 43 | environment.Image = Image 44 | environment.createCanvasElement = env.createCanvasElement || (() => new Canvas()) 45 | environment.createImageElement = env.createImageElement || (() => new Image()) 46 | 47 | environment.ImageData = env.ImageData || environment.ImageData 48 | environment.Video = env.Video || environment.Video 49 | environment.fetch = env.fetch || environment.fetch 50 | environment.readFile = env.readFile || environment.readFile 51 | } 52 | 53 | export const env = { 54 | getEnv, 55 | setEnv, 56 | initialize, 57 | createBrowserEnv, 58 | createFileSystem, 59 | createNodejsEnv, 60 | monkeyPatch, 61 | isBrowser, 62 | isNodejs 63 | } 64 | 65 | initialize() 66 | 67 | export * from './types' 68 | -------------------------------------------------------------------------------- /src/ssdMobilenetv1/types.ts: -------------------------------------------------------------------------------- 1 | import * as tf from '@tensorflow/tfjs-core'; 2 | 3 | import { ConvParams } from '../common'; 4 | 5 | export type PointwiseConvParams = { 6 | filters: tf.Tensor4D 7 | batch_norm_offset: tf.Tensor1D 8 | } 9 | 10 | export namespace MobileNetV1 { 11 | 12 | export type DepthwiseConvParams = { 13 | filters: tf.Tensor4D 14 | batch_norm_scale: tf.Tensor1D 15 | batch_norm_offset: tf.Tensor1D 16 | batch_norm_mean: tf.Tensor1D 17 | batch_norm_variance: tf.Tensor1D 18 | } 19 | 20 | export type ConvPairParams = { 21 | depthwise_conv: DepthwiseConvParams 22 | pointwise_conv: PointwiseConvParams 23 | } 24 | 25 | export type Params = { 26 | conv_0: PointwiseConvParams 27 | conv_1: ConvPairParams 28 | conv_2: ConvPairParams 29 | conv_3: ConvPairParams 30 | conv_4: ConvPairParams 31 | conv_5: ConvPairParams 32 | conv_6: ConvPairParams 33 | conv_7: ConvPairParams 34 | conv_8: ConvPairParams 35 | conv_9: ConvPairParams 36 | conv_10: ConvPairParams 37 | conv_11: ConvPairParams 38 | conv_12: ConvPairParams 39 | conv_13: ConvPairParams 40 | } 41 | 42 | } 43 | 44 | export type BoxPredictionParams = { 45 | box_encoding_predictor: ConvParams 46 | class_predictor: ConvParams 47 | } 48 | 49 | export type PredictionLayerParams = { 50 | conv_0: PointwiseConvParams 51 | conv_1: PointwiseConvParams 52 | conv_2: PointwiseConvParams 53 | conv_3: PointwiseConvParams 54 | conv_4: PointwiseConvParams 55 | conv_5: PointwiseConvParams 56 | conv_6: PointwiseConvParams 57 | conv_7: PointwiseConvParams 58 | box_predictor_0: BoxPredictionParams 59 | box_predictor_1: BoxPredictionParams 60 | box_predictor_2: BoxPredictionParams 61 | box_predictor_3: BoxPredictionParams 62 | box_predictor_4: BoxPredictionParams 63 | box_predictor_5: BoxPredictionParams 64 | } 65 | 66 | export type OutputLayerParams = { 67 | extra_dim: tf.Tensor3D 68 | } 69 | 70 | export type NetParams = { 71 | mobilenetv1: MobileNetV1.Params, 72 | prediction_layer: PredictionLayerParams, 73 | output_layer: OutputLayerParams 74 | } -------------------------------------------------------------------------------- /test/tests/classes/Rect.test.ts: -------------------------------------------------------------------------------- 1 | import { Box } from '../../../src/classes/Box'; 2 | import { Rect } from '../../../src/classes/Rect'; 3 | 4 | describe('Rect', () => { 5 | 6 | describe('constructor', () => { 7 | 8 | it('can be created', () => { 9 | const rect = new Rect(0, 10, 20, 30) 10 | expect(rect instanceof Rect).toBe(true) 11 | expect(rect instanceof Box).toBe(true) 12 | expect(rect.x).toEqual(0) 13 | expect(rect.y).toEqual(10) 14 | expect(rect.width).toEqual(20) 15 | expect(rect.height).toEqual(30) 16 | }) 17 | 18 | it('throws if coordinates are invalid', () => { 19 | 20 | const expectConstructorToThrow = (x: any, y: any, width: any, height: any) => { 21 | expect(() => new Rect(x, y, width, height)).toThrowError(`Box.constructor - expected box to be IBoundingBox | IRect, instead have ${JSON.stringify({ x, y, width, height })}`) 22 | } 23 | 24 | expectConstructorToThrow(NaN, 10, 20, 30) 25 | expectConstructorToThrow(0, Infinity, 20, 30) 26 | expectConstructorToThrow(0, 10, -Infinity, 30) 27 | expectConstructorToThrow(0, 10, 20, null) 28 | expectConstructorToThrow(NaN, Infinity, undefined, null) 29 | expectConstructorToThrow(undefined, undefined, undefined, undefined) 30 | }) 31 | 32 | it('throws if height or width invalid', () => { 33 | expect(() => new Rect(0, 10, -20, 30, false)).toThrowError('Box.constructor - width (-20) and height (30) must be positive numbers') 34 | expect(() => new Rect(0, 10, 20, -30, false)).toThrowError('Box.constructor - width (20) and height (-30) must be positive numbers') 35 | }) 36 | 37 | it('properties', () => { 38 | const rect = new Rect(5, 10, 15, 20) 39 | expect(rect.left).toEqual(5) 40 | expect(rect.x).toEqual(5) 41 | expect(rect.top).toEqual(10) 42 | expect(rect.y).toEqual(10) 43 | expect(rect.right).toEqual(20) 44 | expect(rect.bottom).toEqual(30) 45 | expect(rect.width).toEqual(15) 46 | expect(rect.height).toEqual(20) 47 | expect(rect.area).toEqual(300) 48 | }) 49 | 50 | }) 51 | 52 | }) -------------------------------------------------------------------------------- /src/ssdMobilenetv1/mobileNetV1.ts: -------------------------------------------------------------------------------- 1 | import * as tf from '@tensorflow/tfjs-core'; 2 | 3 | import { pointwiseConvLayer } from './pointwiseConvLayer'; 4 | import { MobileNetV1 } from './types'; 5 | 6 | const epsilon = 0.0010000000474974513 7 | 8 | function depthwiseConvLayer( 9 | x: tf.Tensor4D, 10 | params: MobileNetV1.DepthwiseConvParams, 11 | strides: [number, number] 12 | ) { 13 | return tf.tidy(() => { 14 | 15 | let out = tf.depthwiseConv2d(x, params.filters, strides, 'same') 16 | out = tf.batchNorm( 17 | out, 18 | params.batch_norm_mean, 19 | params.batch_norm_variance, 20 | params.batch_norm_offset, 21 | params.batch_norm_scale, 22 | epsilon 23 | ) 24 | return tf.clipByValue(out, 0, 6) 25 | 26 | }) 27 | } 28 | 29 | function getStridesForLayerIdx(layerIdx: number): [number, number] { 30 | return [2, 4, 6, 12].some(idx => idx === layerIdx) ? [2, 2] : [1, 1] 31 | } 32 | 33 | export function mobileNetV1(x: tf.Tensor4D, params: MobileNetV1.Params) { 34 | return tf.tidy(() => { 35 | 36 | let conv11 = null 37 | let out = pointwiseConvLayer(x, params.conv_0, [2, 2]) 38 | 39 | const convPairParams = [ 40 | params.conv_1, 41 | params.conv_2, 42 | params.conv_3, 43 | params.conv_4, 44 | params.conv_5, 45 | params.conv_6, 46 | params.conv_7, 47 | params.conv_8, 48 | params.conv_9, 49 | params.conv_10, 50 | params.conv_11, 51 | params.conv_12, 52 | params.conv_13 53 | ] 54 | 55 | convPairParams.forEach((param, i) => { 56 | const layerIdx = i + 1 57 | const depthwiseConvStrides = getStridesForLayerIdx(layerIdx) 58 | out = depthwiseConvLayer(out, param.depthwise_conv, depthwiseConvStrides) 59 | out = pointwiseConvLayer(out, param.pointwise_conv, [1, 1]) 60 | if (layerIdx === 11) { 61 | conv11 = out 62 | } 63 | }) 64 | 65 | if (conv11 === null) { 66 | throw new Error('mobileNetV1 - output of conv layer 11 is null') 67 | } 68 | 69 | return { 70 | out, 71 | conv11: conv11 as any 72 | } 73 | 74 | }) 75 | } -------------------------------------------------------------------------------- /test/tests-legacy/faceLandmark68Net.uncompressed.test.ts: -------------------------------------------------------------------------------- 1 | import { FaceLandmarks68, Point } from '../../src'; 2 | import { getTestEnv } from '../env'; 3 | import { describeWithBackend, describeWithNets, expectPointClose } from '../utils'; 4 | 5 | describeWithBackend('faceLandmark68Net, uncompressed', () => { 6 | 7 | let imgEl1: HTMLImageElement 8 | let imgElRect: HTMLImageElement 9 | let faceLandmarkPositions1: Point[] 10 | let faceLandmarkPositionsRect: Point[] 11 | 12 | beforeAll(async () => { 13 | imgEl1 = await getTestEnv().loadImage('test/images/face1.png') 14 | imgElRect = await getTestEnv().loadImage('test/images/face_rectangular.png') 15 | faceLandmarkPositions1 = await getTestEnv().loadJson('test/data/faceLandmarkPositions1.json') 16 | faceLandmarkPositionsRect = await getTestEnv().loadJson('test/data/faceLandmarkPositionsRect.json') 17 | }) 18 | 19 | describeWithNets('uncompressed weights', { withFaceLandmark68Net: { quantized: false } }, ({ faceLandmark68Net }) => { 20 | 21 | it('computes face landmarks for squared input', async () => { 22 | const { width, height } = imgEl1 23 | 24 | const result = await faceLandmark68Net.detectLandmarks(imgEl1) as FaceLandmarks68 25 | expect(result.imageWidth).toEqual(width) 26 | expect(result.imageHeight).toEqual(height) 27 | expect(result.shift.x).toEqual(0) 28 | expect(result.shift.y).toEqual(0) 29 | result.positions.forEach((pt, i) => { 30 | const { x, y } = faceLandmarkPositions1[i] 31 | expectPointClose(pt, { x, y }, 1) 32 | }) 33 | }) 34 | 35 | it('computes face landmarks for rectangular input', async () => { 36 | const { width, height } = imgElRect 37 | 38 | const result = await faceLandmark68Net.detectLandmarks(imgElRect) as FaceLandmarks68 39 | expect(result.imageWidth).toEqual(width) 40 | expect(result.imageHeight).toEqual(height) 41 | expect(result.shift.x).toEqual(0) 42 | expect(result.shift.y).toEqual(0) 43 | result.positions.forEach((pt, i) => { 44 | const { x, y } = faceLandmarkPositionsRect[i] 45 | expectPointClose(pt, { x, y }, 5) 46 | }) 47 | }) 48 | 49 | }) 50 | 51 | }) 52 | 53 | -------------------------------------------------------------------------------- /src/faceFeatureExtractor/denseBlock.ts: -------------------------------------------------------------------------------- 1 | import * as tf from '@tensorflow/tfjs-core'; 2 | 3 | import { ConvParams, SeparableConvParams } from '../common'; 4 | import { depthwiseSeparableConv } from '../common/depthwiseSeparableConv'; 5 | import { DenseBlock3Params, DenseBlock4Params } from './types'; 6 | 7 | export function denseBlock3( 8 | x: tf.Tensor4D, 9 | denseBlockParams: DenseBlock3Params, 10 | isFirstLayer: boolean = false 11 | ): tf.Tensor4D { 12 | return tf.tidy(() => { 13 | const out1 = tf.relu( 14 | isFirstLayer 15 | ? tf.add( 16 | tf.conv2d(x, (denseBlockParams.conv0 as ConvParams).filters, [2, 2], 'same'), 17 | denseBlockParams.conv0.bias 18 | ) 19 | : depthwiseSeparableConv(x, denseBlockParams.conv0 as SeparableConvParams, [2, 2]) 20 | ) as tf.Tensor4D 21 | const out2 = depthwiseSeparableConv(out1, denseBlockParams.conv1, [1, 1]) 22 | 23 | const in3 = tf.relu(tf.add(out1, out2)) as tf.Tensor4D 24 | const out3 = depthwiseSeparableConv(in3, denseBlockParams.conv2, [1, 1]) 25 | 26 | return tf.relu(tf.add(out1, tf.add(out2, out3))) as tf.Tensor4D 27 | }) 28 | } 29 | 30 | export function denseBlock4( 31 | x: tf.Tensor4D, 32 | denseBlockParams: DenseBlock4Params, 33 | isFirstLayer: boolean = false, 34 | isScaleDown: boolean = true 35 | ): tf.Tensor4D { 36 | return tf.tidy(() => { 37 | const out1 = tf.relu( 38 | isFirstLayer 39 | ? tf.add( 40 | tf.conv2d(x, (denseBlockParams.conv0 as ConvParams).filters, isScaleDown ? [2, 2] : [1, 1], 'same'), 41 | denseBlockParams.conv0.bias 42 | ) 43 | : depthwiseSeparableConv(x, denseBlockParams.conv0 as SeparableConvParams, isScaleDown ? [2, 2] : [1, 1]) 44 | ) as tf.Tensor4D 45 | const out2 = depthwiseSeparableConv(out1, denseBlockParams.conv1, [1, 1]) 46 | 47 | const in3 = tf.relu(tf.add(out1, out2)) as tf.Tensor4D 48 | const out3 = depthwiseSeparableConv(in3, denseBlockParams.conv2, [1, 1]) 49 | 50 | const in4 = tf.relu(tf.add(out1, tf.add(out2, out3))) as tf.Tensor4D 51 | const out4 = depthwiseSeparableConv(in4, denseBlockParams.conv3, [1, 1]) 52 | 53 | return tf.relu(tf.add(out1, tf.add(out2, tf.add(out3, out4)))) as tf.Tensor4D 54 | }) 55 | } 56 | -------------------------------------------------------------------------------- /src/ssdMobilenetv1/predictionLayer.ts: -------------------------------------------------------------------------------- 1 | import * as tf from '@tensorflow/tfjs-core'; 2 | 3 | import { boxPredictionLayer } from './boxPredictionLayer'; 4 | import { pointwiseConvLayer } from './pointwiseConvLayer'; 5 | import { PredictionLayerParams } from './types'; 6 | 7 | export function predictionLayer( 8 | x: tf.Tensor4D, 9 | conv11: tf.Tensor4D, 10 | params: PredictionLayerParams 11 | ) { 12 | return tf.tidy(() => { 13 | 14 | const conv0 = pointwiseConvLayer(x, params.conv_0, [1, 1]) 15 | const conv1 = pointwiseConvLayer(conv0, params.conv_1, [2, 2]) 16 | const conv2 = pointwiseConvLayer(conv1, params.conv_2, [1, 1]) 17 | const conv3 = pointwiseConvLayer(conv2, params.conv_3, [2, 2]) 18 | const conv4 = pointwiseConvLayer(conv3, params.conv_4, [1, 1]) 19 | const conv5 = pointwiseConvLayer(conv4, params.conv_5, [2, 2]) 20 | const conv6 = pointwiseConvLayer(conv5, params.conv_6, [1, 1]) 21 | const conv7 = pointwiseConvLayer(conv6, params.conv_7, [2, 2]) 22 | 23 | const boxPrediction0 = boxPredictionLayer(conv11, params.box_predictor_0) 24 | const boxPrediction1 = boxPredictionLayer(x, params.box_predictor_1) 25 | const boxPrediction2 = boxPredictionLayer(conv1, params.box_predictor_2) 26 | const boxPrediction3 = boxPredictionLayer(conv3, params.box_predictor_3) 27 | const boxPrediction4 = boxPredictionLayer(conv5, params.box_predictor_4) 28 | const boxPrediction5 = boxPredictionLayer(conv7, params.box_predictor_5) 29 | 30 | const boxPredictions = tf.concat([ 31 | boxPrediction0.boxPredictionEncoding, 32 | boxPrediction1.boxPredictionEncoding, 33 | boxPrediction2.boxPredictionEncoding, 34 | boxPrediction3.boxPredictionEncoding, 35 | boxPrediction4.boxPredictionEncoding, 36 | boxPrediction5.boxPredictionEncoding 37 | ], 1) as tf.Tensor4D 38 | 39 | const classPredictions = tf.concat([ 40 | boxPrediction0.classPrediction, 41 | boxPrediction1.classPrediction, 42 | boxPrediction2.classPrediction, 43 | boxPrediction3.classPrediction, 44 | boxPrediction4.classPrediction, 45 | boxPrediction5.classPrediction 46 | ], 1) as tf.Tensor4D 47 | 48 | return { 49 | boxPredictions, 50 | classPredictions 51 | } 52 | }) 53 | } -------------------------------------------------------------------------------- /test/tests-legacy/faceLandmark68TinyNet.uncompressed.test.ts: -------------------------------------------------------------------------------- 1 | import { Point } from '../../src'; 2 | import { FaceLandmarks68 } from '../../src/classes/FaceLandmarks68'; 3 | import { getTestEnv } from '../env'; 4 | import { describeWithBackend, describeWithNets, expectPointClose } from '../utils'; 5 | 6 | describeWithBackend('faceLandmark68TinyNet, uncompressed', () => { 7 | 8 | let imgEl1: HTMLImageElement 9 | let imgElRect: HTMLImageElement 10 | let faceLandmarkPositions1: Point[] 11 | let faceLandmarkPositionsRect: Point[] 12 | 13 | beforeAll(async () => { 14 | imgEl1 = await getTestEnv().loadImage('test/images/face1.png') 15 | imgElRect = await getTestEnv().loadImage('test/images/face_rectangular.png') 16 | faceLandmarkPositions1 = await getTestEnv().loadJson('test/data/faceLandmarkPositions1Tiny.json') 17 | faceLandmarkPositionsRect = await getTestEnv().loadJson('test/data/faceLandmarkPositionsRectTiny.json') 18 | }) 19 | 20 | describeWithNets('uncompressed weights', { withFaceLandmark68TinyNet: { quantized: false } }, ({ faceLandmark68TinyNet }) => { 21 | 22 | it('computes face landmarks for squared input', async () => { 23 | const { width, height } = imgEl1 24 | 25 | const result = await faceLandmark68TinyNet.detectLandmarks(imgEl1) as FaceLandmarks68 26 | expect(result.imageWidth).toEqual(width) 27 | expect(result.imageHeight).toEqual(height) 28 | expect(result.shift.x).toEqual(0) 29 | expect(result.shift.y).toEqual(0) 30 | result.positions.forEach((pt, i) => { 31 | const { x, y } = faceLandmarkPositions1[i] 32 | expectPointClose(pt, { x, y }, 5) 33 | }) 34 | }) 35 | 36 | it('computes face landmarks for rectangular input', async () => { 37 | const { width, height } = imgElRect 38 | 39 | const result = await faceLandmark68TinyNet.detectLandmarks(imgElRect) as FaceLandmarks68 40 | expect(result.imageWidth).toEqual(width) 41 | expect(result.imageHeight).toEqual(height) 42 | expect(result.shift.x).toEqual(0) 43 | expect(result.shift.y).toEqual(0) 44 | result.positions.forEach((pt, i) => { 45 | const { x, y } = faceLandmarkPositionsRect[i] 46 | expectPointClose(pt, { x, y }, 5) 47 | }) 48 | }) 49 | 50 | }) 51 | 52 | }) 53 | 54 | -------------------------------------------------------------------------------- /src/dom/toNetInput.ts: -------------------------------------------------------------------------------- 1 | import { isTensor3D, isTensor4D } from '../utils'; 2 | import { awaitMediaLoaded } from './awaitMediaLoaded'; 3 | import { isMediaElement } from './isMediaElement'; 4 | import { NetInput } from './NetInput'; 5 | import { resolveInput } from './resolveInput'; 6 | import { TNetInput } from './types'; 7 | 8 | /** 9 | * Validates the input to make sure, they are valid net inputs and awaits all media elements 10 | * to be finished loading. 11 | * 12 | * @param input The input, which can be a media element or an array of different media elements. 13 | * @returns A NetInput instance, which can be passed into one of the neural networks. 14 | */ 15 | export async function toNetInput(inputs: TNetInput): Promise { 16 | if (inputs instanceof NetInput) { 17 | return inputs 18 | } 19 | 20 | let inputArgArray = Array.isArray(inputs) 21 | ? inputs 22 | : [inputs] 23 | 24 | if (!inputArgArray.length) { 25 | throw new Error('toNetInput - empty array passed as input') 26 | } 27 | 28 | const getIdxHint = (idx: number) => Array.isArray(inputs) ? ` at input index ${idx}:` : '' 29 | 30 | const inputArray = inputArgArray.map(resolveInput) 31 | 32 | inputArray.forEach((input, i) => { 33 | if (!isMediaElement(input) && !isTensor3D(input) && !isTensor4D(input)) { 34 | 35 | if (typeof inputArgArray[i] === 'string') { 36 | throw new Error(`toNetInput -${getIdxHint(i)} string passed, but could not resolve HTMLElement for element id ${inputArgArray[i]}`) 37 | } 38 | 39 | throw new Error(`toNetInput -${getIdxHint(i)} expected media to be of type HTMLImageElement | HTMLVideoElement | HTMLCanvasElement | tf.Tensor3D, or to be an element id`) 40 | } 41 | 42 | if (isTensor4D(input)) { 43 | // if tf.Tensor4D is passed in the input array, the batch size has to be 1 44 | const batchSize = input.shape[0] 45 | if (batchSize !== 1) { 46 | throw new Error(`toNetInput -${getIdxHint(i)} tf.Tensor4D with batchSize ${batchSize} passed, but not supported in input array`) 47 | } 48 | } 49 | }) 50 | 51 | // wait for all media elements being loaded 52 | await Promise.all( 53 | inputArray.map(input => isMediaElement(input) && awaitMediaLoaded(input)) 54 | ) 55 | 56 | return new NetInput(inputArray, Array.isArray(inputs)) 57 | } --------------------------------------------------------------------------------