├── .gitignore
├── .idea
├── codeStyles
│ ├── Project.xml
│ └── codeStyleConfig.xml
├── misc.xml
├── modules.xml
├── runConfigurations.xml
└── vcs.xml
├── README.md
├── app
├── .gitignore
├── build.gradle
├── proguard-rules.pro
└── src
│ └── main
│ ├── AndroidManifest.xml
│ ├── java
│ └── devnibbles
│ │ └── android
│ │ └── facialrecognition
│ │ ├── AbstractActivity.kt
│ │ ├── GoogleVisionActivity.kt
│ │ ├── MLKitActivity.kt
│ │ ├── classify
│ │ ├── CloudAutoMLModel.kt
│ │ ├── CloudAutoMLService.kt
│ │ ├── CloudAutoMLViewModel.kt
│ │ └── common
│ │ │ ├── AbstractViewModel.kt
│ │ │ ├── Extensions.kt
│ │ │ ├── FaceClassification.kt
│ │ │ └── Resource.kt
│ │ └── detect
│ │ ├── common
│ │ ├── AbstractFaceGraphic.kt
│ │ ├── CameraSourcePreview.kt
│ │ ├── GraphicOverlay.kt
│ │ └── ICameraSource.kt
│ │ ├── googlevision
│ │ ├── FaceGraphic.kt
│ │ ├── GVCameraSource.kt
│ │ └── SaveFrameFaceDetector.kt
│ │ └── mlkit
│ │ ├── FaceDetector.kt
│ │ ├── FaceGraphic.kt
│ │ ├── FrameMetadata.kt
│ │ ├── IFrameProcessor.kt
│ │ └── MLCameraSource.kt
│ └── res
│ ├── layout
│ └── activity_main.xml
│ ├── mipmap-hdpi
│ └── ic_launcher.png
│ ├── mipmap-mdpi
│ └── ic_launcher.png
│ ├── mipmap-xhdpi
│ └── ic_launcher.png
│ ├── mipmap-xxhdpi
│ └── ic_launcher.png
│ ├── mipmap-xxxhdpi
│ └── ic_launcher.png
│ └── values
│ ├── colors.xml
│ ├── strings.xml
│ └── styles.xml
├── build.gradle
├── gradle.properties
├── gradle
└── wrapper
│ ├── gradle-wrapper.jar
│ └── gradle-wrapper.properties
├── gradlew
├── gradlew.bat
└── settings.gradle
/.gitignore:
--------------------------------------------------------------------------------
1 | # Built application files
2 | *.apk
3 | *.ap_
4 |
5 | # Files for the ART/Dalvik VM
6 | *.dex
7 |
8 | # Java class files
9 | *.class
10 |
11 | # Generated files
12 | bin/
13 | gen/
14 | out/
15 |
16 | # Gradle files
17 | .gradle/
18 | build/
19 |
20 | # Local configuration file (sdk path, etc)
21 | local.properties
22 |
23 | # Proguard folder generated by Eclipse
24 | proguard/
25 |
26 | # Log Files
27 | *.log
28 |
29 | # Android Studio Navigation editor temp files
30 | .navigation/
31 |
32 | # Android Studio captures folder
33 | captures/
34 |
35 | # IntelliJ
36 | *.iml
37 | .idea/workspace.xml
38 | .idea/tasks.xml
39 | .idea/gradle.xml
40 | .idea/assetWizardSettings.xml
41 | .idea/dictionaries
42 | .idea/libraries
43 | .idea/caches
44 |
45 | # Keystore files
46 | # Uncomment the following line if you do not want to check your keystore files in.
47 | #*.jks
48 |
49 | # External native build folder generated in Android Studio 2.2 and later
50 | .externalNativeBuild
51 |
52 | # Google Services (e.g. APIs or Firebase)
53 | google-services.json
54 |
55 | # Freeline
56 | freeline.py
57 | freeline/
58 | freeline_project_description.json
59 |
60 | # fastlane
61 | fastlane/report.xml
62 | fastlane/Preview.html
63 | fastlane/screenshots
64 | fastlane/test_output
65 | fastlane/readme.md
66 |
--------------------------------------------------------------------------------
/.idea/codeStyles/Project.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
--------------------------------------------------------------------------------
/.idea/codeStyles/codeStyleConfig.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
--------------------------------------------------------------------------------
/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
--------------------------------------------------------------------------------
/.idea/runConfigurations.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
11 |
12 |
--------------------------------------------------------------------------------
/.idea/vcs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | Source Code to DevNibbles article - Facial Recognition with Android
2 |
3 | Part 1 - https://medium.com/devnibbles/facial-recognition-with-android-1-4-5e043c264edc
4 | Part 2 - https://medium.com/devnibbles/facial-recognition-with-android-2-4-d02f03f2a11e
5 |
--------------------------------------------------------------------------------
/app/.gitignore:
--------------------------------------------------------------------------------
1 | /build
2 |
--------------------------------------------------------------------------------
/app/build.gradle:
--------------------------------------------------------------------------------
1 | apply plugin: 'com.android.application'
2 | apply plugin: 'kotlin-android'
3 | apply plugin: 'kotlin-android-extensions'
4 | apply plugin: 'kotlin-kapt'
5 |
6 | android {
7 | compileSdkVersion 28
8 | defaultConfig {
9 | applicationId "devnibbles.android.facialrecognition"
10 | minSdkVersion 21
11 | targetSdkVersion 28
12 | versionCode 1
13 | versionName "1.0"
14 | }
15 | buildTypes {
16 | release {
17 | minifyEnabled false
18 | proguardFiles getDefaultProguardFile('proguard-android-optimize.txt'), 'proguard-rules.pro'
19 | }
20 | }
21 |
22 | packagingOptions {
23 | exclude 'META-INF/INDEX.LIST'
24 | }
25 |
26 | compileOptions {
27 | sourceCompatibility JavaVersion.VERSION_1_8
28 | targetCompatibility JavaVersion.VERSION_1_8
29 | }
30 | }
31 |
32 | dependencies {
33 | implementation fileTree(dir: 'libs', include: ['*.jar'])
34 | implementation "org.jetbrains.kotlin:kotlin-stdlib-jdk8:$kotlin_version"
35 | implementation 'org.jetbrains.kotlinx:kotlinx-coroutines-android:1.1.1'
36 | implementation 'androidx.appcompat:appcompat:1.0.2'
37 | implementation 'com.google.android.material:material:1.0.0'
38 |
39 | // ViewModel dependencies
40 | implementation 'androidx.lifecycle:lifecycle-extensions:2.0.0'
41 | kapt 'androidx.lifecycle:lifecycle-compiler:2.0.0'
42 |
43 | // Google Vision dependencies
44 | implementation 'com.google.android.gms:play-services-vision:17.0.2'
45 |
46 | // ML Kit dependencies
47 | implementation 'com.google.firebase:firebase-ml-vision:19.0.2'
48 | implementation 'com.google.firebase:firebase-ml-vision-face-model:17.0.2'
49 |
50 | // Cloud AutoML SDK
51 | implementation 'com.google.cloud:google-cloud-automl:0.55.1-beta'
52 | implementation 'io.grpc:grpc-okhttp:1.13.1' // specific version needed for cloud-automl
53 |
54 | // Retrofit
55 | implementation 'com.google.code.gson:gson:2.8.5'
56 | implementation 'com.squareup.retrofit2:retrofit:2.4.0'
57 | implementation 'com.squareup.retrofit2:converter-gson:2.4.0'
58 | implementation 'com.squareup.retrofit2:adapter-rxjava:2.4.0'
59 | implementation 'com.jakewharton.retrofit:retrofit2-kotlin-coroutines-adapter:0.9.2'
60 | implementation 'com.squareup.okhttp3:logging-interceptor:3.11.0'
61 |
62 | }
63 | apply plugin: 'com.google.gms.google-services'
64 |
--------------------------------------------------------------------------------
/app/proguard-rules.pro:
--------------------------------------------------------------------------------
1 | # Add project specific ProGuard rules here.
2 | # You can control the set of applied configuration files using the
3 | # proguardFiles setting in build.gradle.
4 | #
5 | # For more details, see
6 | # http://developer.android.com/guide/developing/tools/proguard.html
7 |
8 | # If your project uses WebView with JS, uncomment the following
9 | # and specify the fully qualified class name to the JavaScript interface
10 | # class:
11 | #-keepclassmembers class fqcn.of.javascript.interface.for.webview {
12 | # public *;
13 | #}
14 |
15 | # Uncomment this to preserve the line number information for
16 | # debugging stack traces.
17 | #-keepattributes SourceFile,LineNumberTable
18 |
19 | # If you keep the line number information, uncomment this to
20 | # hide the original source file name.
21 | #-renamesourcefileattribute SourceFile
22 |
--------------------------------------------------------------------------------
/app/src/main/AndroidManifest.xml:
--------------------------------------------------------------------------------
1 |
2 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
17 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
38 |
39 |
40 |
41 |
--------------------------------------------------------------------------------
/app/src/main/java/devnibbles/android/facialrecognition/AbstractActivity.kt:
--------------------------------------------------------------------------------
1 | package devnibbles.android.facialrecognition
2 |
3 | import android.Manifest
4 | import android.app.AlertDialog
5 | import android.content.DialogInterface
6 | import android.content.pm.PackageManager
7 | import androidx.appcompat.app.AppCompatActivity
8 | import android.os.Bundle
9 | import android.util.Log
10 | import android.view.View
11 | import androidx.core.app.ActivityCompat
12 | import com.google.android.gms.common.ConnectionResult
13 | import com.google.android.gms.common.GoogleApiAvailability
14 | import com.google.android.material.snackbar.Snackbar
15 | import devnibbles.android.facialrecognition.detect.common.CameraSourcePreview
16 | import devnibbles.android.facialrecognition.detect.common.GraphicOverlay
17 |
18 | abstract class AbstractActivity : AppCompatActivity() {
19 |
20 | companion object {
21 | private const val TAG = "AbstractActivity"
22 |
23 | private const val RC_HANDLE_GMS = 9001
24 | private const val RC_HANDLE_CAMERA_PERM = 2
25 | }
26 |
27 | protected lateinit var mCameraPreview: CameraSourcePreview
28 | protected lateinit var mGraphicOverlay: GraphicOverlay
29 |
30 | //==============================================================================================
31 | // Activity Methods
32 | //==============================================================================================
33 |
34 | /**
35 | * Initializes the UI and initiates the creation of a face detector.
36 | */
37 | public override fun onCreate(icicle: Bundle?) {
38 | super.onCreate(icicle)
39 | setContentView(R.layout.activity_main)
40 |
41 | mCameraPreview = findViewById(R.id.preview)
42 | mGraphicOverlay = findViewById(R.id.faceOverlay)
43 |
44 | // Check for the camera permission before accessing the camera. If the
45 | // permission is not granted yet, request permission.
46 | val rc = ActivityCompat.checkSelfPermission(this, Manifest.permission.CAMERA)
47 | if (rc == PackageManager.PERMISSION_GRANTED) {
48 | createCameraSource()
49 | } else {
50 | requestCameraPermission()
51 | }
52 |
53 | }
54 |
55 | /**
56 | * Handles the requesting of the camera permission. This includes
57 | * showing a "Snackbar" message of why the permission is needed then
58 | * sending the request.
59 | */
60 | private fun requestCameraPermission() {
61 | Log.w(TAG, "Camera permission is not granted. Requesting permission")
62 |
63 | val permissions = arrayOf(Manifest.permission.CAMERA)
64 |
65 | if (!ActivityCompat.shouldShowRequestPermissionRationale(this,
66 | Manifest.permission.CAMERA)) {
67 | ActivityCompat.requestPermissions(this, permissions,
68 | RC_HANDLE_CAMERA_PERM
69 | )
70 | return
71 | }
72 |
73 | val thisActivity = this
74 |
75 | val listener = View.OnClickListener {
76 | ActivityCompat.requestPermissions(thisActivity, permissions,
77 | RC_HANDLE_CAMERA_PERM
78 | )
79 | }
80 |
81 | Snackbar.make(mGraphicOverlay as View, R.string.permission_camera_rationale,
82 | Snackbar.LENGTH_INDEFINITE)
83 | .setAction(R.string.ok, listener)
84 | .show()
85 | }
86 |
87 | abstract fun createCameraSource()
88 |
89 | abstract fun startCameraSource()
90 |
91 | abstract fun releaseCameraSource()
92 |
93 |
94 | /**
95 | * Restarts the camera.
96 | */
97 | override fun onResume() {
98 | super.onResume()
99 |
100 | startCameraSource()
101 | }
102 |
103 | /**
104 | * Stops the camera.
105 | */
106 | override fun onPause() {
107 | super.onPause()
108 | mCameraPreview.stop()
109 | }
110 |
111 | override fun onDestroy() {
112 | super.onDestroy()
113 |
114 | releaseCameraSource()
115 | }
116 |
117 | protected fun checkGooglePlayServices() {
118 | // check that the device has play services available.
119 | val code = GoogleApiAvailability.getInstance().isGooglePlayServicesAvailable(applicationContext)
120 | if (code != ConnectionResult.SUCCESS) {
121 | GoogleApiAvailability.getInstance().getErrorDialog(this, code,
122 | RC_HANDLE_GMS
123 | ).show()
124 | }
125 | }
126 |
127 | /**
128 | * Callback for the result from requesting permissions. This method
129 | * is invoked for every call on [.requestPermissions].
130 | *
131 | *
132 | * **Note:** It is possible that the permissions request interaction
133 | * with the user is interrupted. In this case you will receive empty permissions
134 | * and results arrays which should be treated as a cancellation.
135 | *
136 | *
137 | * @param requestCode The request code passed in [.requestPermissions].
138 | * @param permissions The requested permissions. Never null.
139 | * @param grantResults The grant results for the corresponding permissions
140 | * which is either [PackageManager.PERMISSION_GRANTED]
141 | * or [PackageManager.PERMISSION_DENIED]. Never null.
142 | * @see .requestPermissions
143 | */
144 | override fun onRequestPermissionsResult(requestCode: Int, permissions: Array, grantResults: IntArray) {
145 | if (requestCode != RC_HANDLE_CAMERA_PERM) {
146 | Log.d(TAG, "Got unexpected permission result: $requestCode")
147 | super.onRequestPermissionsResult(requestCode, permissions, grantResults)
148 | return
149 | }
150 |
151 | if (grantResults.isNotEmpty() && grantResults[0] == PackageManager.PERMISSION_GRANTED) {
152 | Log.d(TAG, "Camera permission granted - initialize the camera source")
153 | // we have permission, so create the camera source
154 | createCameraSource()
155 | return
156 | }
157 |
158 | Log.e(
159 | TAG, "Permission not granted: results len = " + grantResults.size +
160 | " Result code = " + if (grantResults.isNotEmpty()) grantResults[0] else "(empty)")
161 |
162 | val listener = DialogInterface.OnClickListener { _, _ -> finish() }
163 |
164 | val builder = AlertDialog.Builder(this)
165 | builder.setTitle("Face Tracker sample")
166 | .setMessage(R.string.no_camera_permission)
167 | .setPositiveButton(R.string.ok, listener)
168 | .show()
169 | }
170 |
171 | }
172 |
--------------------------------------------------------------------------------
/app/src/main/java/devnibbles/android/facialrecognition/GoogleVisionActivity.kt:
--------------------------------------------------------------------------------
1 | package devnibbles.android.facialrecognition
2 |
3 | import android.os.Bundle
4 | import android.util.Log
5 | import androidx.lifecycle.Observer
6 | import androidx.lifecycle.ViewModelProviders
7 | import com.google.android.gms.vision.Detector
8 | import com.google.android.gms.vision.MultiProcessor
9 | import com.google.android.gms.vision.Tracker
10 | import com.google.android.gms.vision.face.Face
11 | import com.google.android.gms.vision.face.FaceDetector
12 | import devnibbles.android.facialrecognition.classify.CloudAutoMLViewModel
13 | import devnibbles.android.facialrecognition.classify.common.*
14 | import devnibbles.android.facialrecognition.detect.googlevision.FaceGraphic
15 | import devnibbles.android.facialrecognition.detect.googlevision.GVCameraSource
16 | import devnibbles.android.facialrecognition.detect.googlevision.SaveFrameFaceDetector
17 | import java.io.IOException
18 |
19 | class GoogleVisionActivity : AbstractActivity() {
20 |
21 | companion object {
22 | private const val TAG = "GoogleVisionActivity"
23 | }
24 |
25 | private var mCameraSource: GVCameraSource? = null
26 | private lateinit var mDetector: SaveFrameFaceDetector
27 | private lateinit var mViewModel: CloudAutoMLViewModel
28 |
29 | override fun onCreate(icicle: Bundle?) {
30 | super.onCreate(icicle)
31 |
32 | mViewModel = ViewModelProviders.of(this).get(CloudAutoMLViewModel::class.java)
33 | mViewModel.subscribeClassifications()
34 | .observe(this, Observer> { resource ->
35 | when (resource) {
36 | is LoadingResource -> {
37 | System.out.println("Classifying...")
38 | }
39 | is SuccessResource -> {
40 | System.out.println("SuccessResource : " + resource.data)
41 | val faceId = resource.data.faceId
42 | val name = resource.data.name
43 | val score = resource.data.confidence
44 | (mGraphicOverlay.find(faceId) as? FaceGraphic)?.setName("$name ($score)")
45 | }
46 | is ErrorResource -> {
47 | System.out.println("ErrorResource : " + resource.data)
48 | resource.errorData?.printStackTrace()
49 | }
50 | }
51 | })
52 | }
53 |
54 | /**
55 | * Creates and starts the camera.
56 | */
57 | override fun createCameraSource() {
58 | val context = applicationContext
59 | val detector = FaceDetector.Builder(context)
60 | .setLandmarkType(FaceDetector.ALL_LANDMARKS)
61 | .build()
62 |
63 | mDetector = SaveFrameFaceDetector(detector)
64 | mDetector.setProcessor(
65 | MultiProcessor.Builder(GraphicFaceTrackerFactory())
66 | .build()
67 | )
68 |
69 | if (!mDetector.isOperational) {
70 | // Note: The first time that an app using face API is installed on a device, GMS will
71 | // download a native library to the device in order to do detection. Usually this
72 | // completes before the app is run for the first time. But if that download has not yet
73 | // completed, then the above call will not detect any faces.
74 | //
75 | // isOperational() can be used to check if the required native library is currently
76 | // available. The detector will automatically become operational once the library
77 | // download completes on device.
78 | Log.w(TAG, "Face detector dependencies are not yet available.")
79 | }
80 |
81 | mCameraSource = GVCameraSource(context, mDetector)
82 | }
83 |
84 | /**
85 | * Starts or restarts the camera source, if it exists.
86 | */
87 | override fun startCameraSource() {
88 | checkGooglePlayServices()
89 |
90 | mCameraSource?.let {
91 | try {
92 | mCameraPreview.start(it, mGraphicOverlay)
93 | } catch (e: IOException) {
94 | Log.e(TAG, "Unable to start camera source.", e)
95 | mCameraSource!!.release()
96 | mCameraSource = null
97 | }
98 | }
99 | }
100 |
101 | /**
102 | * Releases the resources associated with the camera source.
103 | */
104 | override fun releaseCameraSource() {
105 | if (mCameraSource != null) {
106 | mCameraSource!!.release()
107 | mCameraSource = null
108 | }
109 | }
110 |
111 | /**
112 | * Factory for creating a face tracker to be associated with a new face. The multiprocessor
113 | * uses this factory to create face trackers as needed -- one for each individual.
114 | */
115 | private inner class GraphicFaceTrackerFactory : MultiProcessor.Factory {
116 | override fun create(face: Face): Tracker {
117 | return GraphicFaceTracker()
118 | }
119 | }
120 |
121 | /**
122 | * Face tracker for each detected individual. This maintains a face graphic within the app's
123 | * associated face overlay.
124 | */
125 | private inner class GraphicFaceTracker internal constructor() : Tracker() {
126 | private var mFaceGraphic: FaceGraphic? = null
127 |
128 | /**
129 | * Start tracking the detected face instance within the face overlay.
130 | */
131 | override fun onNewItem(faceId: Int, item: Face) {
132 | mFaceGraphic = FaceGraphic(faceId, mGraphicOverlay)
133 | mDetector.lastFrame?.let { frame ->
134 | // Lets try and find out who this face belongs to
135 | mViewModel.classify(faceId, frame.convertToByteArray())
136 | }
137 | }
138 |
139 | /**
140 | * Update the position/characteristics of the face within the overlay.
141 | */
142 | override fun onUpdate(detectionResults: Detector.Detections, face: Face) {
143 | mFaceGraphic?.updateFace(face)
144 | mGraphicOverlay.add(mFaceGraphic)
145 | }
146 |
147 | /**
148 | * Hide the graphic when the corresponding face was not detected. This can happen for
149 | * intermediate frames temporarily (e.g., if the face was momentarily blocked from
150 | * view).
151 | */
152 | override fun onMissing(detectionResults: Detector.Detections) {
153 | mGraphicOverlay.remove(mFaceGraphic)
154 | }
155 |
156 | /**
157 | * Called when the face is assumed to be gone for good. Remove the graphic annotation from
158 | * the overlay.
159 | */
160 | override fun onDone() {
161 | mGraphicOverlay.remove(mFaceGraphic)
162 | }
163 | }
164 |
165 | }
166 |
--------------------------------------------------------------------------------
/app/src/main/java/devnibbles/android/facialrecognition/MLKitActivity.kt:
--------------------------------------------------------------------------------
1 | package devnibbles.android.facialrecognition
2 |
3 | import android.os.Bundle
4 | import android.util.Log
5 | import androidx.lifecycle.Observer
6 | import androidx.lifecycle.ViewModelProviders
7 | import com.google.firebase.ml.vision.face.FirebaseVisionFace
8 | import devnibbles.android.facialrecognition.classify.CloudAutoMLViewModel
9 | import devnibbles.android.facialrecognition.classify.common.*
10 | import devnibbles.android.facialrecognition.detect.mlkit.FaceDetector
11 | import devnibbles.android.facialrecognition.detect.mlkit.FaceGraphic
12 | import devnibbles.android.facialrecognition.detect.mlkit.FrameMetadata
13 | import devnibbles.android.facialrecognition.detect.mlkit.MLCameraSource
14 | import java.io.IOException
15 | import java.nio.ByteBuffer
16 |
17 |
18 | class MLKitActivity : AbstractActivity() {
19 |
20 | companion object {
21 | private const val TAG = "MLKitActivity"
22 | }
23 |
24 | private var mCameraSource: MLCameraSource? = null
25 | private lateinit var mViewModel: CloudAutoMLViewModel
26 |
27 | override fun onCreate(icicle: Bundle?) {
28 | super.onCreate(icicle)
29 |
30 | mViewModel = ViewModelProviders.of(this).get(CloudAutoMLViewModel::class.java)
31 | mViewModel.subscribeClassifications()
32 | .observe(this, Observer> { resource ->
33 | when (resource) {
34 | is LoadingResource -> {
35 | System.out.println("Classifying...")
36 | }
37 | is SuccessResource -> {
38 | System.out.println("SuccessResource : " + resource.data)
39 | val faceId = resource.data.faceId
40 | val name = resource.data.name
41 | val score = resource.data.confidence
42 | (mGraphicOverlay.find(faceId) as? FaceGraphic)?.setName("$name ($score)")
43 | }
44 | is ErrorResource -> {
45 | System.out.println("ErrorResource : " + resource.data)
46 | resource.errorData?.printStackTrace()
47 | }
48 | }
49 | })
50 | }
51 |
52 | /**
53 | * Creates and starts the camera.
54 | */
55 | override fun createCameraSource() {
56 | mCameraSource = MLCameraSource(this, mGraphicOverlay)
57 | mCameraSource!!.setMachineLearningFrameProcessor(FaceDetector(object : FaceDetector.DetectorCallback {
58 | override fun onSuccess(
59 | frameData: ByteBuffer,
60 | results: List,
61 | frameMetadata: FrameMetadata
62 | ) {
63 | if (results.isEmpty()) {
64 | // No faces in frame, so clear frame of any previous faces.
65 | mGraphicOverlay.clear()
66 | } else {
67 | // We have faces
68 | results.forEach { face ->
69 | val existingFace = mGraphicOverlay.find(face.trackingId) as FaceGraphic?
70 | if (existingFace == null) {
71 | // A new face has been detected.
72 | val faceGraphic = FaceGraphic(face.trackingId, mGraphicOverlay)
73 | mGraphicOverlay.add(faceGraphic)
74 |
75 | // Lets try and find out who this face belongs to
76 | mViewModel.classify(face.trackingId, frameData.convertToByteArray(frameMetadata))
77 | } else {
78 | // We have an existing face, update its position in the frame.
79 | existingFace.updateFace(face)
80 | }
81 | }
82 |
83 | mGraphicOverlay.postInvalidate()
84 |
85 | }
86 | }
87 |
88 | override fun onFailure(exception: Exception) {
89 | exception.printStackTrace()
90 | }
91 | }))
92 | }
93 |
94 | /**
95 | * Starts or restarts the camera source, if it exists.
96 | */
97 | override fun startCameraSource() {
98 | checkGooglePlayServices()
99 |
100 | if (mCameraSource != null) {
101 | try {
102 | mCameraPreview.start(mCameraSource!!, mGraphicOverlay)
103 | } catch (e: IOException) {
104 | Log.e(TAG, "Unable to start camera source.", e)
105 | mCameraSource!!.release()
106 | mCameraSource = null
107 | }
108 | }
109 | }
110 |
111 | /**
112 | * Releases the resources associated with the camera source.
113 | */
114 | override fun releaseCameraSource() {
115 | if (mCameraSource != null) {
116 | mCameraSource!!.release()
117 | mCameraSource = null
118 | }
119 | }
120 | }
121 |
--------------------------------------------------------------------------------
/app/src/main/java/devnibbles/android/facialrecognition/classify/CloudAutoMLModel.kt:
--------------------------------------------------------------------------------
1 | package devnibbles.android.facialrecognition.classify
2 |
3 | // Expected json payload for webservice.
4 | // {
5 | // "payload": {
6 | // "image": {
7 | // "imageBytes": "YOUR_IMAGE_BYTE"
8 | // }
9 | // }
10 | // }
11 |
12 | data class CloudAutoMLModel(val payload: Payload)
13 |
14 | data class Payload(val image: MlImage)
15 |
16 | data class MlImage(val imageBytes: String)
17 |
18 |
--------------------------------------------------------------------------------
/app/src/main/java/devnibbles/android/facialrecognition/classify/CloudAutoMLService.kt:
--------------------------------------------------------------------------------
1 | package devnibbles.android.facialrecognition.classify
2 |
3 | import kotlinx.coroutines.Deferred
4 | import retrofit2.http.Body
5 | import retrofit2.http.Header
6 | import retrofit2.http.POST
7 | import retrofit2.http.Path
8 |
9 | // curl -X POST -H "Content-Type: application/json" \
10 | // -H "Authorization: Bearer " \
11 | // https://automl.googleapis.com/v1beta1/projects/devnibbles/locations/us-central1/models/ICN3704829353327390855:predict -d @request.json
12 |
13 | // Expected json response from webservice
14 | //{
15 | // "payload": [
16 | // {
17 | // "classification": {
18 | // "score": 0.87991875
19 | // },
20 | // "displayName": "Andy"
21 | // }
22 | // ]
23 | //}
24 |
25 | interface CloudAutoMLService {
26 |
27 | @POST("/v1beta1/projects/{project}/locations/{location}/models/{model}:predict")
28 | fun classify(
29 | @Header("Authorization") authorization: String,
30 | @Path("project") project: String,
31 | @Path("location") location: String,
32 | @Path("model") model: String,
33 | @Body body: CloudAutoMLModel
34 | ): Deferred
35 |
36 | data class Score(val score: Double)
37 | data class Classification(val classification: Score?, val displayName: String?)
38 | data class CloudResponse(val payload: List?)
39 |
40 | }
--------------------------------------------------------------------------------
/app/src/main/java/devnibbles/android/facialrecognition/classify/CloudAutoMLViewModel.kt:
--------------------------------------------------------------------------------
1 | package devnibbles.android.facialrecognition.classify
2 |
3 | import androidx.lifecycle.LiveData
4 | import androidx.lifecycle.MutableLiveData
5 | import com.google.api.client.util.Base64
6 | import com.google.api.gax.core.FixedCredentialsProvider
7 | import com.google.auth.oauth2.AccessToken
8 | import com.google.auth.oauth2.ServiceAccountCredentials
9 | import com.google.cloud.automl.v1beta1.*
10 | import com.google.gson.GsonBuilder
11 | import com.google.protobuf.ByteString
12 | import com.jakewharton.retrofit2.adapter.kotlin.coroutines.CoroutineCallAdapterFactory
13 | import devnibbles.android.facialrecognition.classify.common.*
14 | import kotlinx.coroutines.CoroutineExceptionHandler
15 | import kotlinx.coroutines.Dispatchers
16 | import kotlinx.coroutines.launch
17 | import kotlinx.coroutines.withContext
18 | import okhttp3.OkHttpClient
19 | import okhttp3.logging.HttpLoggingInterceptor
20 | import retrofit2.Retrofit
21 | import retrofit2.converter.gson.GsonConverterFactory
22 | import java.io.ByteArrayInputStream
23 | import java.nio.charset.Charset
24 |
25 |
26 | class CloudAutoMLViewModel : AbstractViewModel() {
27 |
28 | companion object {
29 | private const val REST_CLASSIFIER =
30 | false // flag to decide if we should use REST (true) or SDK (false) classifier.
31 |
32 | private const val PROJECT = "devnibbles"
33 | private const val LOCATION = "us-central1"
34 | private const val MODEL = "ICN3704829353327390855"
35 | private const val SERVICE_ACCOUNT_JSON = ""
36 | }
37 |
38 | private val mServiceCredentials = ServiceAccountCredentials
39 | .fromStream(ByteArrayInputStream(SERVICE_ACCOUNT_JSON.toByteArray(Charset.defaultCharset())))
40 | .createScoped(mutableListOf("https://www.googleapis.com/auth/cloud-platform"))
41 |
42 | private val errorHandler = CoroutineExceptionHandler { _, throwable ->
43 | mResult.postValue(ErrorResource(throwable))
44 | }
45 |
46 | private val mResult = MutableLiveData>()
47 |
48 | fun subscribeClassifications(): LiveData> {
49 | return mResult
50 | }
51 |
52 | private var accessToken: AccessToken? = null
53 |
54 | init {
55 | Thread {
56 | accessToken = mServiceCredentials.accessToken
57 | if (accessToken == null) {
58 | accessToken = mServiceCredentials.refreshAccessToken()
59 | }
60 | }.start()
61 | }
62 |
63 | fun classify(faceId: Int, imageBytes: ByteArray) {
64 | if (REST_CLASSIFIER) {
65 | classifyUsingRetrofit(faceId, imageBytes)
66 |
67 | } else {
68 | classifyUsingCloudSDK(faceId, imageBytes)
69 |
70 | }
71 | }
72 |
73 | private fun classifyUsingRetrofit(faceId: Int, imageBytes: ByteArray) {
74 | launch(errorHandler) {
75 | // Show loading indicator while we wait for the request.
76 | mResult.value = LoadingResource(null)
77 |
78 | // Build the body of our request, essentially the image to be classified.
79 | val body = CloudAutoMLModel(
80 | Payload(
81 | MlImage(
82 | String(
83 | Base64.encodeBase64(imageBytes)
84 | )
85 | )
86 | )
87 | )
88 |
89 | // Define the authentication credentials and make the API request
90 | val response = getRESTService().classify(
91 | "Bearer ${accessToken?.tokenValue}",
92 | PROJECT, LOCATION, MODEL, body
93 | ).await()
94 |
95 | System.out.println("Response : " + response.payload?.size + " : " + response.payload?.firstOrNull()?.displayName)
96 |
97 | if (response.payload?.isNotEmpty() == true) {
98 | // We have a prediction!
99 | var predictedName: String? = null
100 | var predictedConfidence: Double? = null
101 |
102 | response.payload.forEach { entry ->
103 | if (entry.displayName != null) {
104 | predictedName = entry.displayName
105 | predictedConfidence = entry.classification?.score
106 | }
107 | }
108 |
109 | if (predictedName != null && predictedConfidence != null) {
110 | // We had an actual name returned
111 | mResult.postValue(
112 | SuccessResource(
113 | FaceClassification(
114 | faceId,
115 | predictedName!!,
116 | predictedConfidence!!
117 | )
118 | )
119 | )
120 | } else {
121 | // No name was returned, this is an unknown face.
122 | mResult.postValue(ErrorResource(null))
123 | }
124 | } else {
125 | // There were no payloads returned, possible error or unknown face.
126 | mResult.postValue(ErrorResource(null))
127 | }
128 | }
129 | }
130 |
131 | private fun classifyUsingCloudSDK(faceId: Int, imageBytes: ByteArray) {
132 | launch(errorHandler) {
133 | // Show loading indicator while we wait for the request.
134 | mResult.value = LoadingResource(null)
135 |
136 | withContext(Dispatchers.IO) {
137 | // Define the authentication credentials
138 | val settings = PredictionServiceSettings.newBuilder()
139 | .setCredentialsProvider(FixedCredentialsProvider.create(mServiceCredentials)).build()
140 |
141 | val predictionServiceClient = PredictionServiceClient.create(settings)
142 | predictionServiceClient.use { client ->
143 | // Build the body of our request, essentially the image to be classified.
144 | val name = ModelName.of(PROJECT, LOCATION, MODEL)
145 | val image = Image.newBuilder().setImageBytes(ByteString.copyFrom(imageBytes)).build()
146 | val payload = ExamplePayload.newBuilder().setImage(image).build()
147 | val params = HashMap()
148 |
149 | // Make the API request.
150 | val response = client.predict(name, payload, params)
151 |
152 | System.out.println("response : $response")
153 |
154 | if (response.payloadCount > 0) {
155 | // We have a prediction!
156 | var predictedName: String? = null
157 | var predictedConfidence: Double? = null
158 |
159 | response.getPayload(0).allFields.entries.forEach { entry ->
160 | System.out.println("Entry : ${entry.key.jsonName} = ${entry.value}")
161 |
162 | if (entry.key.jsonName == "displayName") {
163 | predictedName = entry.value as String
164 | } else if (entry.key.jsonName == "classification") {
165 | val classification = entry.value as ClassificationProto.ClassificationAnnotation
166 | predictedConfidence= classification.score.toDouble()
167 | }
168 | }
169 |
170 | if (predictedName != null && predictedConfidence != null) {
171 | // We had an actual name returned
172 | mResult.postValue(
173 | SuccessResource(
174 | FaceClassification(
175 | faceId,
176 | predictedName!!,
177 | predictedConfidence!!
178 | )
179 | )
180 | )
181 | } else {
182 | // No name was returned, this is an unknown face.
183 | mResult.postValue(ErrorResource(null))
184 | }
185 | } else {
186 | // There were no payloads returned, possible error or unknown face.
187 | mResult.postValue(ErrorResource(null))
188 | }
189 | }
190 | }
191 | }
192 | }
193 |
194 | private fun getRESTService(): CloudAutoMLService {
195 | val gsonFactory = GsonConverterFactory
196 | .create(GsonBuilder().create())
197 |
198 | val networkClient = OkHttpClient.Builder()
199 | .addInterceptor(HttpLoggingInterceptor().apply {
200 | level = HttpLoggingInterceptor.Level.BODY
201 | })
202 | .build()
203 |
204 | return Retrofit.Builder()
205 | .baseUrl("https://automl.googleapis.com/")
206 | .addCallAdapterFactory(CoroutineCallAdapterFactory())
207 | .addConverterFactory(gsonFactory)
208 | .client(networkClient)
209 | .build()
210 | .create(CloudAutoMLService::class.java)
211 | }
212 |
213 | }
214 |
215 |
216 |
217 |
--------------------------------------------------------------------------------
/app/src/main/java/devnibbles/android/facialrecognition/classify/common/AbstractViewModel.kt:
--------------------------------------------------------------------------------
1 | package devnibbles.android.facialrecognition.classify.common
2 |
3 | import androidx.lifecycle.ViewModel
4 | import kotlinx.coroutines.CoroutineScope
5 | import kotlinx.coroutines.Dispatchers
6 | import kotlinx.coroutines.Job
7 | import kotlin.coroutines.CoroutineContext
8 |
9 | abstract class AbstractViewModel : ViewModel(), CoroutineScope {
10 |
11 | private val coroutineJob = Job()
12 |
13 | override val coroutineContext: CoroutineContext
14 | get() = Dispatchers.Main + coroutineJob
15 |
16 | override fun onCleared() {
17 | super.onCleared()
18 |
19 | coroutineJob.cancel()
20 | }
21 | }
--------------------------------------------------------------------------------
/app/src/main/java/devnibbles/android/facialrecognition/classify/common/Extensions.kt:
--------------------------------------------------------------------------------
1 | package devnibbles.android.facialrecognition.classify.common
2 |
3 | import android.graphics.Bitmap
4 | import android.graphics.ImageFormat
5 | import android.graphics.Rect
6 | import android.graphics.YuvImage
7 | import com.google.android.gms.vision.Frame
8 | import devnibbles.android.facialrecognition.detect.mlkit.FrameMetadata
9 | import java.io.ByteArrayOutputStream
10 | import java.nio.ByteBuffer
11 |
12 |
13 | fun Bitmap.convertToByteArray(): ByteArray {
14 | //minimum number of bytes that can be used to store this bitmap's pixels
15 | val size = this.byteCount
16 |
17 | //allocate new instances which will hold bitmap
18 | val buffer = ByteBuffer.allocate(size)
19 | val bytes = ByteArray(size)
20 |
21 | //copy the bitmap's pixels into the specified buffer
22 | this.copyPixelsToBuffer(buffer)
23 |
24 | //rewinds buffer (buffer position is set to zero and the mark is discarded)
25 | buffer.rewind()
26 |
27 | //transfer bytes from buffer into the given destination array
28 | buffer.get(bytes)
29 |
30 | //return bitmap's pixels
31 | return bytes
32 | }
33 |
34 | fun Frame.convertToByteArray(quality: Int = 100): ByteArray {
35 | val bytes = this.bitmap?.convertToByteArray() ?: this.grayscaleImageData.array()
36 |
37 | val yuvImage = YuvImage(bytes, this.metadata.format, this.metadata.width, this.metadata.height, null)
38 | val byteArrayOutputStream = ByteArrayOutputStream()
39 | yuvImage.compressToJpeg(Rect(0, 0, this.metadata.width, this.metadata.height), quality, byteArrayOutputStream)
40 | return byteArrayOutputStream.toByteArray()
41 | }
42 |
43 | fun ByteBuffer.convertToByteArray(metadata: FrameMetadata): ByteArray {
44 | this.rewind()
45 | val imageInBuffer = ByteArray(this.limit())
46 | this.get(imageInBuffer, 0, imageInBuffer.size)
47 |
48 | val baos = ByteArrayOutputStream()
49 | baos.use { stream ->
50 | val image = YuvImage(imageInBuffer, ImageFormat.NV21, metadata.width, metadata.height, null)
51 | image.compressToJpeg(Rect(0, 0, metadata.width, metadata.height), 80, stream)
52 | return stream.toByteArray()
53 | }
54 | }
--------------------------------------------------------------------------------
/app/src/main/java/devnibbles/android/facialrecognition/classify/common/FaceClassification.kt:
--------------------------------------------------------------------------------
1 | package devnibbles.android.facialrecognition.classify.common
2 |
3 | data class FaceClassification(
4 | val faceId: Int,
5 | val name: String,
6 | val confidence: Double
7 | )
--------------------------------------------------------------------------------
/app/src/main/java/devnibbles/android/facialrecognition/classify/common/Resource.kt:
--------------------------------------------------------------------------------
1 | package devnibbles.android.facialrecognition.classify.common
2 |
3 | sealed class Resource(open val data: T?)
4 |
5 | data class LoadingResource constructor(override val data: T? = null) : Resource(data)
6 | data class ErrorResource constructor(val errorData: E?, override val data: T? = null) : Resource(data)
7 | data class SuccessResource constructor(override val data: T) : Resource(data)
--------------------------------------------------------------------------------
/app/src/main/java/devnibbles/android/facialrecognition/detect/common/AbstractFaceGraphic.kt:
--------------------------------------------------------------------------------
1 | package devnibbles.android.facialrecognition.detect.common
2 |
3 | import android.graphics.Canvas
4 | import android.graphics.Color
5 | import android.graphics.Paint
6 | import android.graphics.PointF
7 |
8 |
9 | abstract class AbstractFaceGraphic(faceId: Int, graphicOverlay: GraphicOverlay) :
10 | GraphicOverlay.Graphic(faceId, graphicOverlay) {
11 |
12 | companion object {
13 | private const val DOT_RADIUS = 10.0f
14 | private const val TEXT_SIZE = 40.0f
15 | }
16 |
17 | private val mFacePositionPaint = Paint()
18 |
19 | abstract fun leftEyePosition(): PointF?
20 | abstract fun rightEyePosition(): PointF?
21 | abstract fun namePosition(): PointF?
22 |
23 | private var name: String? = null
24 |
25 | init {
26 | mFacePositionPaint.color = Color.WHITE
27 | mFacePositionPaint.textSize = TEXT_SIZE
28 | mFacePositionPaint.textAlign = Paint.Align.CENTER
29 | }
30 |
31 | fun setName(name: String) {
32 | this.name = name
33 | postInvalidate()
34 | }
35 |
36 | override fun draw(canvas: Canvas) {
37 | leftEyePosition()?.let { position ->
38 | canvas.drawCircle(
39 | translateX(position.x),
40 | translateY(position.y),
41 | DOT_RADIUS,
42 | mFacePositionPaint
43 | )
44 | }
45 |
46 | rightEyePosition()?.let { position ->
47 | canvas.drawCircle(
48 | translateX(position.x),
49 | translateY(position.y),
50 | DOT_RADIUS,
51 | mFacePositionPaint
52 | )
53 | }
54 |
55 | namePosition()?.let { position ->
56 | if (name != null) {
57 | canvas.drawText(
58 | name!!,
59 | translateX(position.x),
60 | translateY(position.y),
61 | mFacePositionPaint
62 | )
63 | }
64 | }
65 | }
66 | }
67 |
--------------------------------------------------------------------------------
/app/src/main/java/devnibbles/android/facialrecognition/detect/common/CameraSourcePreview.kt:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (C) The Android Open Source Project
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package devnibbles.android.facialrecognition.detect.common
17 |
18 | import android.content.Context
19 | import android.content.res.Configuration
20 | import android.util.AttributeSet
21 | import android.util.Log
22 | import android.view.SurfaceHolder
23 | import android.view.SurfaceView
24 | import android.view.ViewGroup
25 |
26 | import java.io.IOException
27 |
28 | class CameraSourcePreview(private val mContext: Context, attrs: AttributeSet) : ViewGroup(mContext, attrs) {
29 |
30 | companion object {
31 | private const val TAG = "CameraSourcePreview"
32 | }
33 |
34 | private val mSurfaceView: SurfaceView
35 | private var mStartRequested: Boolean = false
36 | private var mSurfaceAvailable: Boolean = false
37 | private var mCameraSource: ICameraSource? = null
38 | private var mOverlay: GraphicOverlay? = null
39 |
40 | private val isPortraitMode: Boolean
41 | get() {
42 | val orientation = mContext.resources.configuration.orientation
43 | if (orientation == Configuration.ORIENTATION_LANDSCAPE) {
44 | return false
45 | }
46 | if (orientation == Configuration.ORIENTATION_PORTRAIT) {
47 | return true
48 | }
49 |
50 | Log.d(TAG, "isPortraitMode returning false by default")
51 | return false
52 | }
53 |
54 | init {
55 | mStartRequested = false
56 | mSurfaceAvailable = false
57 |
58 | mSurfaceView = SurfaceView(mContext)
59 | mSurfaceView.holder.addCallback(SurfaceCallback())
60 | addView(mSurfaceView)
61 | }
62 |
63 | @Throws(IOException::class)
64 | fun start(cameraSource: ICameraSource?) {
65 | if (cameraSource == null) {
66 | stop()
67 | }
68 |
69 | mCameraSource = cameraSource
70 |
71 | if (mCameraSource != null) {
72 | mStartRequested = true
73 | startIfReady()
74 | }
75 | }
76 |
77 | @Throws(IOException::class)
78 | fun start(cameraSource: ICameraSource, overlay: GraphicOverlay) {
79 | mOverlay = overlay
80 | start(cameraSource)
81 | }
82 |
83 | fun stop() {
84 | if (mCameraSource != null) {
85 | mCameraSource!!.stop()
86 | }
87 | }
88 |
89 | fun release() {
90 | if (mCameraSource != null) {
91 | mCameraSource!!.release()
92 | mCameraSource = null
93 | }
94 | }
95 |
96 | @Throws(IOException::class)
97 | private fun startIfReady() {
98 | if (mStartRequested && mSurfaceAvailable) {
99 | mCameraSource!!.start(mSurfaceView.holder)
100 | if (mOverlay != null) {
101 | val size = mCameraSource!!.previewSize()
102 | size?.let {
103 | val min = Math.min(it.width, it.height)
104 | val max = Math.max(it.width, it.height)
105 | if (isPortraitMode) {
106 | // Swap width and height sizes when in portrait, since it will be rotated by
107 | // 90 degrees
108 | mOverlay!!.setCameraInfo(min, max, mCameraSource!!.cameraFacing())
109 | } else {
110 | mOverlay!!.setCameraInfo(max, min, mCameraSource!!.cameraFacing())
111 | }
112 | mOverlay!!.clear()
113 | }
114 | }
115 | mStartRequested = false
116 | }
117 | }
118 |
119 | private inner class SurfaceCallback : SurfaceHolder.Callback {
120 | override fun surfaceCreated(surface: SurfaceHolder) {
121 | mSurfaceAvailable = true
122 | try {
123 | startIfReady()
124 | } catch (e: IOException) {
125 | Log.e(TAG, "Could not start camera source.", e)
126 | }
127 |
128 | }
129 |
130 | override fun surfaceDestroyed(surface: SurfaceHolder) {
131 | mSurfaceAvailable = false
132 | }
133 |
134 | override fun surfaceChanged(holder: SurfaceHolder, format: Int, width: Int, height: Int) {}
135 | }
136 |
137 | override fun onLayout(changed: Boolean, left: Int, top: Int, right: Int, bottom: Int) {
138 | var width = 640
139 | var height = 480
140 | if (mCameraSource != null) {
141 | val size = mCameraSource!!.previewSize()
142 | if (size != null) {
143 | width = size.width
144 | height = size.height
145 | }
146 | }
147 |
148 | // Swap width and height sizes when in portrait, since it will be rotated 90 degrees
149 | if (isPortraitMode) {
150 | val tmp = width
151 | width = height
152 | height = tmp
153 | }
154 |
155 | val layoutWidth = right - left
156 | val layoutHeight = bottom - top
157 |
158 | // Computes height and width for potentially doing fit width.
159 | var childWidth = layoutWidth
160 | var childHeight = (layoutWidth.toFloat() / width.toFloat() * height).toInt()
161 |
162 | // If height is too tall using fit width, does fit height instead.
163 | if (childHeight > layoutHeight) {
164 | childHeight = layoutHeight
165 | childWidth = (layoutHeight.toFloat() / height.toFloat() * width).toInt()
166 | }
167 |
168 | for (i in 0 until childCount) {
169 | getChildAt(i).layout(0, 0, childWidth, childHeight)
170 | }
171 |
172 | try {
173 | startIfReady()
174 | } catch (e: IOException) {
175 | Log.e(TAG, "Could not start camera source.", e)
176 | }
177 | }
178 |
179 | }
180 |
--------------------------------------------------------------------------------
/app/src/main/java/devnibbles/android/facialrecognition/detect/common/GraphicOverlay.kt:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (C) The Android Open Source Project
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package devnibbles.android.facialrecognition.detect.common
17 |
18 | import android.content.Context
19 | import android.graphics.Canvas
20 | import android.util.AttributeSet
21 | import android.view.View
22 |
23 | /**
24 | * A view which renders a series of custom graphics to be overlayed on top of an associated preview
25 | * (i.e., the camera preview). The creator can add graphics objects, update the objects, and remove
26 | * them, triggering the appropriate drawing and invalidation within the view.
27 | *
28 | *
29 | *
30 | * Supports scaling and mirroring of the graphics relative the camera's preview properties. The
31 | * idea is that detection items are expressed in terms of a preview size, but need to be scaled up
32 | * to the full view size, and also mirrored in the case of the front-facing camera.
33 | *
34 | *
35 | *
36 | * Associated [Graphic] items should use the following methods to convert to view coordinates
37 | * for the graphics that are drawn:
38 | *
39 | * 1. [Graphic.scaleX] and [Graphic.scaleY] adjust the size of the
40 | * supplied value from the preview scale to the view scale.
41 | * 1. [Graphic.translateX] and [Graphic.translateY] adjust the coordinate
42 | * from the preview's coordinate system to the view coordinate system.
43 | *
44 | */
45 | class GraphicOverlay(context: Context, attrs: AttributeSet) : View(context, attrs) {
46 | companion object {
47 | // Copied values from Google Vision library to remove dependency.
48 | @JvmStatic
49 | val CAMERA_FACING_BACK = 0
50 |
51 | @JvmStatic
52 | val CAMERA_FACING_FRONT = 1
53 | }
54 |
55 | private val mLock = Object()
56 | private var mPreviewWidth: Int = 0
57 | private var mWidthScaleFactor = 1.0f
58 | private var mPreviewHeight: Int = 0
59 | private var mHeightScaleFactor = 1.0f
60 | private var mFacing = CAMERA_FACING_BACK
61 | private val mGraphics = HashSet()
62 |
63 | /**
64 | * Base class for a custom graphics object to be rendered within the graphic overlay. Subclass
65 | * this and implement the [Graphic.draw] method to define the
66 | * graphics element. Add instances to the overlay using [GraphicOverlay.add].
67 | */
68 | abstract class Graphic(val id: Int, private val mOverlay: GraphicOverlay) {
69 |
70 | /**
71 | * Draw the graphic on the supplied canvas. Drawing should use the following methods to
72 | * convert to view coordinates for the graphics that are drawn:
73 | *
74 | * 1. [Graphic.scaleX] and [Graphic.scaleY] adjust the size of
75 | * the supplied value from the preview scale to the view scale.
76 | * 1. [Graphic.translateX] and [Graphic.translateY] adjust the
77 | * coordinate from the preview's coordinate system to the view coordinate system.
78 | *
79 | *
80 | * @param canvas drawing canvas
81 | */
82 | abstract fun draw(canvas: Canvas)
83 |
84 | /**
85 | * Adjusts a horizontal value of the supplied value from the preview scale to the view
86 | * scale.
87 | */
88 | fun scaleX(horizontal: Float): Float {
89 | return horizontal * mOverlay.mWidthScaleFactor
90 | }
91 |
92 | /**
93 | * Adjusts a vertical value of the supplied value from the preview scale to the view scale.
94 | */
95 | fun scaleY(vertical: Float): Float {
96 | return vertical * mOverlay.mHeightScaleFactor
97 | }
98 |
99 | /**
100 | * Adjusts the x coordinate from the preview's coordinate system to the view coordinate
101 | * system.
102 | */
103 | fun translateX(x: Float): Float {
104 | return if (mOverlay.mFacing == CAMERA_FACING_FRONT) {
105 | mOverlay.width - scaleX(x)
106 | } else {
107 | scaleX(x)
108 | }
109 | }
110 |
111 | /**
112 | * Adjusts the y coordinate from the preview's coordinate system to the view coordinate
113 | * system.
114 | */
115 | fun translateY(y: Float): Float {
116 | return scaleY(y)
117 | }
118 |
119 | fun postInvalidate() {
120 | mOverlay.postInvalidate()
121 | }
122 | }
123 |
124 | /**
125 | * Removes all graphics from the overlay.
126 | */
127 | fun clear() {
128 | synchronized(mLock) {
129 | mGraphics.clear()
130 | }
131 | postInvalidate()
132 | }
133 |
134 | /**
135 | * Adds a graphic to the overlay.
136 | */
137 | fun add(graphic: Graphic?) {
138 | graphic?.let {
139 | synchronized(mLock) {
140 | mGraphics.add(it)
141 | }
142 | postInvalidate()
143 | }
144 | }
145 |
146 | fun find(id: Int): Graphic? {
147 | return mGraphics.find { it.id == id }
148 | }
149 |
150 | /**
151 | * Removes a graphic from the overlay.
152 | */
153 | fun remove(graphic: Graphic?) {
154 | synchronized(mLock) {
155 | mGraphics.remove(graphic)
156 | }
157 | postInvalidate()
158 | }
159 |
160 | /**
161 | * Sets the camera attributes for size and facing direction, which informs how to transform
162 | * image coordinates later.
163 | */
164 | fun setCameraInfo(previewWidth: Int, previewHeight: Int, facing: Int) {
165 | synchronized(mLock) {
166 | mPreviewWidth = previewWidth
167 | mPreviewHeight = previewHeight
168 | mFacing = facing
169 | }
170 | postInvalidate()
171 | }
172 |
173 | /**
174 | * Draws the overlay with its associated graphic objects.
175 | */
176 | override fun onDraw(canvas: Canvas) {
177 | super.onDraw(canvas)
178 |
179 | synchronized(mLock) {
180 | if (mPreviewWidth != 0 && mPreviewHeight != 0) {
181 | mWidthScaleFactor = canvas.width.toFloat() / mPreviewWidth.toFloat()
182 | mHeightScaleFactor = canvas.height.toFloat() / mPreviewHeight.toFloat()
183 | }
184 |
185 | for (graphic in mGraphics) {
186 | graphic.draw(canvas)
187 | }
188 | }
189 | }
190 | }
191 |
--------------------------------------------------------------------------------
/app/src/main/java/devnibbles/android/facialrecognition/detect/common/ICameraSource.kt:
--------------------------------------------------------------------------------
1 | package devnibbles.android.facialrecognition.detect.common
2 |
3 | import android.view.SurfaceHolder
4 |
5 | import com.google.android.gms.common.images.Size
6 |
7 | import java.io.IOException
8 |
9 | interface ICameraSource {
10 |
11 | fun previewSize(): Size?
12 |
13 | fun cameraFacing(): Int
14 |
15 | fun release()
16 |
17 | @Throws(IOException::class)
18 | fun start(surfaceHolder: SurfaceHolder)
19 |
20 | fun stop()
21 | }
22 |
--------------------------------------------------------------------------------
/app/src/main/java/devnibbles/android/facialrecognition/detect/googlevision/FaceGraphic.kt:
--------------------------------------------------------------------------------
1 | package devnibbles.android.facialrecognition.detect.googlevision
2 |
3 | import android.graphics.PointF
4 |
5 | import com.google.android.gms.vision.face.Face
6 | import com.google.android.gms.vision.face.Landmark
7 | import devnibbles.android.facialrecognition.detect.common.AbstractFaceGraphic
8 | import devnibbles.android.facialrecognition.detect.common.GraphicOverlay
9 |
10 |
11 | /**
12 | * Graphic instance for rendering face position, orientation, and landmarks within an associated
13 | * graphic overlay view.
14 | */
15 | class FaceGraphic(faceId: Int, graphicOverlay: GraphicOverlay) : AbstractFaceGraphic(faceId, graphicOverlay) {
16 |
17 | private var face: Face? = null
18 |
19 | override fun rightEyePosition() : PointF? {
20 | return face?.landmarks?.firstOrNull { it.type == Landmark.RIGHT_EYE }?.position
21 | }
22 |
23 | override fun leftEyePosition() : PointF? {
24 | return face?.landmarks?.firstOrNull { it.type == Landmark.LEFT_EYE }?.position
25 | }
26 |
27 | override fun namePosition() : PointF? {
28 | return face?.landmarks?.firstOrNull { it.type == Landmark.NOSE_BASE }?.position
29 | }
30 |
31 | fun updateFace(face: Face) {
32 | this.face = face
33 | postInvalidate()
34 | }
35 | }
36 |
--------------------------------------------------------------------------------
/app/src/main/java/devnibbles/android/facialrecognition/detect/googlevision/GVCameraSource.kt:
--------------------------------------------------------------------------------
1 | package devnibbles.android.facialrecognition.detect.googlevision
2 |
3 | import android.content.Context
4 | import android.view.SurfaceHolder
5 |
6 | import com.google.android.gms.common.images.Size
7 | import com.google.android.gms.vision.CameraSource
8 | import com.google.android.gms.vision.Detector
9 | import devnibbles.android.facialrecognition.detect.common.GraphicOverlay
10 | import devnibbles.android.facialrecognition.detect.common.ICameraSource
11 |
12 | import java.io.IOException
13 |
14 | class GVCameraSource(context: Context, detector: Detector<*>) :
15 | ICameraSource {
16 |
17 | private val delegate = CameraSource.Builder(context, detector)
18 | .setRequestedPreviewSize(640, 480)
19 | .setFacing(GraphicOverlay.CAMERA_FACING_FRONT)
20 | .setRequestedFps(15.0f)
21 | .build()
22 |
23 | override fun previewSize(): Size? {
24 | return delegate.previewSize
25 | }
26 |
27 | override fun cameraFacing(): Int {
28 | return delegate.cameraFacing
29 | }
30 |
31 | override fun release() {
32 | delegate.release()
33 | }
34 |
35 | @Throws(IOException::class)
36 | override fun start(surfaceHolder: SurfaceHolder) {
37 | delegate.start(surfaceHolder)
38 | }
39 |
40 | override fun stop() {
41 | delegate.stop()
42 | }
43 |
44 | }
45 |
--------------------------------------------------------------------------------
/app/src/main/java/devnibbles/android/facialrecognition/detect/googlevision/SaveFrameFaceDetector.kt:
--------------------------------------------------------------------------------
1 | package devnibbles.android.facialrecognition.detect.googlevision
2 |
3 | import android.util.SparseArray
4 |
5 | import com.google.android.gms.vision.Detector
6 | import com.google.android.gms.vision.Frame
7 | import com.google.android.gms.vision.face.Face
8 |
9 | class SaveFrameFaceDetector(private val delegateDetector: Detector) : Detector() {
10 | var lastFrame: Frame? = null
11 |
12 | override fun detect(frame: Frame): SparseArray {
13 | lastFrame = frame
14 |
15 | return delegateDetector.detect(frame)
16 | }
17 |
18 | override fun isOperational(): Boolean {
19 | return delegateDetector.isOperational
20 | }
21 |
22 | override fun setFocus(id: Int): Boolean {
23 | return delegateDetector.setFocus(id)
24 | }
25 | }
26 |
27 |
--------------------------------------------------------------------------------
/app/src/main/java/devnibbles/android/facialrecognition/detect/mlkit/FaceDetector.kt:
--------------------------------------------------------------------------------
1 | package devnibbles.android.facialrecognition.detect.mlkit
2 |
3 | import android.util.Log
4 | import androidx.annotation.GuardedBy
5 | import com.google.firebase.ml.vision.FirebaseVision
6 | import com.google.firebase.ml.vision.common.FirebaseVisionImage
7 | import com.google.firebase.ml.vision.common.FirebaseVisionImageMetadata
8 | import com.google.firebase.ml.vision.face.FirebaseVisionFace
9 | import com.google.firebase.ml.vision.face.FirebaseVisionFaceDetectorOptions
10 | import java.io.IOException
11 |
12 | import java.nio.ByteBuffer
13 |
14 | /**
15 | * Abstract base class for ML Kit frame processors. Subclasses need to implement {@link
16 | * #onSuccess(T, FrameMetadata, GraphicOverlay)} to define what they want to with the detection
17 | * results and {@link #detectInImage(FirebaseVisionImage)} to specify the detector object.
18 | *
19 | * @param The type of the detected feature.
20 | */
21 | class FaceDetector(private val callback: DetectorCallback?) : IFrameProcessor {
22 |
23 | interface DetectorCallback {
24 | fun onSuccess(frameData: ByteBuffer, results: List, frameMetadata: FrameMetadata)
25 | fun onFailure(exception: Exception)
26 | }
27 |
28 | companion object {
29 | private const val TAG = "FaceDetector"
30 | }
31 |
32 | private val delegateDetector = FirebaseVision.getInstance()
33 | .getVisionFaceDetector(
34 | FirebaseVisionFaceDetectorOptions.Builder()
35 | .enableTracking()
36 | .setLandmarkMode(FirebaseVisionFaceDetectorOptions.ALL_LANDMARKS)
37 | .build()
38 | )
39 |
40 | // To keep the latest images and its metadata.
41 | @GuardedBy("this")
42 | private var latestImage: ByteBuffer? = null
43 |
44 | @GuardedBy("this")
45 | private var latestImageMetaData: FrameMetadata? = null
46 |
47 | // To keep the images and metadata in process.
48 | @GuardedBy("this")
49 | private var processingImage: ByteBuffer? = null
50 |
51 | @GuardedBy("this")
52 | private var processingMetaData: FrameMetadata? = null
53 |
54 | @Synchronized
55 | override fun process(data: ByteBuffer, frameMetadata: FrameMetadata) {
56 | latestImage = data
57 | latestImageMetaData = frameMetadata
58 | if (processingImage == null && processingMetaData == null) {
59 | processLatestImage()
60 | }
61 | }
62 |
63 | @Synchronized
64 | private fun processLatestImage() {
65 | processingImage = latestImage
66 | processingMetaData = latestImageMetaData
67 | latestImage = null
68 | latestImageMetaData = null
69 | if (processingImage != null && processingMetaData != null) {
70 | processImage(processingImage!!, processingMetaData!!)
71 | }
72 | }
73 |
74 | private fun processImage(data: ByteBuffer, frameMetadata: FrameMetadata) {
75 | val metadata = FirebaseVisionImageMetadata.Builder()
76 | .setFormat(FirebaseVisionImageMetadata.IMAGE_FORMAT_NV21)
77 | .setWidth(frameMetadata.width)
78 | .setHeight(frameMetadata.height)
79 | .setRotation(frameMetadata.rotation)
80 | .build()
81 |
82 | val image = FirebaseVisionImage.fromByteBuffer(data, metadata)
83 |
84 | delegateDetector.detectInImage(image)
85 | .addOnSuccessListener { results ->
86 | callback?.onSuccess(data, results, frameMetadata)
87 | processLatestImage()
88 | }
89 | .addOnFailureListener { e ->
90 | callback?.onFailure(e)
91 | }
92 | }
93 |
94 | override fun stop() {
95 | try {
96 | delegateDetector.close()
97 | } catch (e: IOException) {
98 | Log.e(TAG, "Exception thrown while trying to close Face Detector: $e")
99 | }
100 | }
101 |
102 | }
103 |
--------------------------------------------------------------------------------
/app/src/main/java/devnibbles/android/facialrecognition/detect/mlkit/FaceGraphic.kt:
--------------------------------------------------------------------------------
1 | package devnibbles.android.facialrecognition.detect.mlkit
2 |
3 | import android.graphics.PointF
4 |
5 | import com.google.firebase.ml.vision.face.FirebaseVisionFace
6 | import com.google.firebase.ml.vision.face.FirebaseVisionFaceLandmark
7 | import devnibbles.android.facialrecognition.detect.common.AbstractFaceGraphic
8 | import devnibbles.android.facialrecognition.detect.common.GraphicOverlay
9 |
10 |
11 | /**
12 | * Graphic instance for rendering face position, orientation, and landmarks within an associated
13 | * graphic overlay view.
14 | */
15 | class FaceGraphic(faceId: Int, graphicOverlay: GraphicOverlay) :
16 | AbstractFaceGraphic(faceId, graphicOverlay) {
17 |
18 | private var face: FirebaseVisionFace? = null
19 |
20 | override fun leftEyePosition(): PointF? {
21 | return PointF(
22 | face?.getLandmark(FirebaseVisionFaceLandmark.LEFT_EYE)?.position?.x ?: 0f,
23 | face?.getLandmark(FirebaseVisionFaceLandmark.LEFT_EYE)?.position?.y ?: 0f
24 | )
25 | }
26 |
27 | override fun rightEyePosition(): PointF? {
28 | return PointF(
29 | face?.getLandmark(FirebaseVisionFaceLandmark.RIGHT_EYE)?.position?.x ?: 0f,
30 | face?.getLandmark(FirebaseVisionFaceLandmark.RIGHT_EYE)?.position?.y ?: 0f
31 | )
32 | }
33 |
34 | override fun namePosition(): PointF? {
35 | return PointF(
36 | face?.getLandmark(FirebaseVisionFaceLandmark.NOSE_BASE)?.position?.x ?: 0f,
37 | face?.getLandmark(FirebaseVisionFaceLandmark.NOSE_BASE)?.position?.y ?: 0f
38 | )
39 | }
40 |
41 | fun updateFace(face: FirebaseVisionFace) {
42 | this.face = face
43 | postInvalidate()
44 | }
45 | }
46 |
47 |
--------------------------------------------------------------------------------
/app/src/main/java/devnibbles/android/facialrecognition/detect/mlkit/FrameMetadata.kt:
--------------------------------------------------------------------------------
1 | // Copyright 2018 Google LLC
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // http://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.
14 | package devnibbles.android.facialrecognition.detect.mlkit
15 |
16 | /** Describing a frame info. */
17 | class FrameMetadata private constructor(val width: Int, val height: Int, val rotation: Int, val cameraFacing: Int) {
18 |
19 | /** Builder of [FrameMetadata]. */
20 | class Builder {
21 |
22 | private var width: Int = 0
23 | private var height: Int = 0
24 | private var rotation: Int = 0
25 | private var cameraFacing: Int = 0
26 |
27 | fun setWidth(width: Int): Builder {
28 | this.width = width
29 | return this
30 | }
31 |
32 | fun setHeight(height: Int): Builder {
33 | this.height = height
34 | return this
35 | }
36 |
37 | fun setRotation(rotation: Int): Builder {
38 | this.rotation = rotation
39 | return this
40 | }
41 |
42 | fun setCameraFacing(facing: Int): Builder {
43 | cameraFacing = facing
44 | return this
45 | }
46 |
47 | fun build(): FrameMetadata {
48 | return FrameMetadata(width, height, rotation, cameraFacing)
49 | }
50 | }
51 | }
52 |
--------------------------------------------------------------------------------
/app/src/main/java/devnibbles/android/facialrecognition/detect/mlkit/IFrameProcessor.kt:
--------------------------------------------------------------------------------
1 | package devnibbles.android.facialrecognition.detect.mlkit
2 |
3 | import java.nio.ByteBuffer
4 |
5 | interface IFrameProcessor {
6 |
7 | fun process(data: ByteBuffer, frameMetadata: FrameMetadata)
8 |
9 | fun stop()
10 |
11 | }
--------------------------------------------------------------------------------
/app/src/main/java/devnibbles/android/facialrecognition/detect/mlkit/MLCameraSource.kt:
--------------------------------------------------------------------------------
1 | // Copyright 2018 Google LLC
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // http://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.
14 |
15 | package devnibbles.android.facialrecognition.detect.mlkit
16 |
17 | import android.Manifest
18 | import android.annotation.SuppressLint
19 | import android.app.Activity
20 | import android.content.Context
21 | import android.graphics.ImageFormat
22 | import android.hardware.Camera
23 | import android.hardware.Camera.CameraInfo
24 | import android.util.Log
25 | import android.view.Surface
26 | import android.view.SurfaceHolder
27 | import android.view.WindowManager
28 | import androidx.annotation.RequiresPermission
29 | import com.google.android.gms.common.images.Size
30 | import devnibbles.android.facialrecognition.detect.common.GraphicOverlay
31 | import devnibbles.android.facialrecognition.detect.common.ICameraSource
32 |
33 | import java.io.IOException
34 | import java.lang.Thread.State
35 | import java.nio.ByteBuffer
36 | import java.util.ArrayList
37 | import java.util.IdentityHashMap
38 |
39 | /**
40 | * Manages the camera and allows UI updates on top of it (e.g. overlaying extra Graphics or
41 | * displaying extra information). This receives preview frames from the camera at a specified rate,
42 | * sending those frames to child classes' detectors / classifiers as fast as it is able to process.
43 | */
44 | @SuppressLint("MissingPermission")
45 | class MLCameraSource(private var activity: Activity, private val graphicOverlay: GraphicOverlay) :
46 | ICameraSource {
47 |
48 | private var camera: Camera? = null
49 |
50 | private var cameraFacing = GraphicOverlay.CAMERA_FACING_FRONT
51 | private var previewSize: Size? = null
52 |
53 | /**
54 | * Rotation of the device, and thus the associated preview images captured from the device. See
55 | * Frame.Metadata#getRotation().
56 | */
57 | private var rotation: Int = 0
58 |
59 | // These values may be requested by the caller. Due to hardware limitations, we may need to
60 | // select close, but not exactly the same values for these.
61 | private val requestedFps = 20.0f
62 |
63 | override fun previewSize(): Size? {
64 | return previewSize
65 | }
66 |
67 | override fun cameraFacing(): Int {
68 | return cameraFacing
69 | }
70 |
71 | private val requestedPreviewWidth = 640
72 | private val requestedPreviewHeight = 480
73 | private val requestedAutoFocus = true
74 |
75 | // True if a SurfaceTexture is being used for the preview, false if a SurfaceHolder is being
76 | // used for the preview. We want to be compatible back to Gingerbread, but SurfaceTexture
77 | // wasn't introduced until Honeycomb. Since the interface cannot use a SurfaceTexture, if the
78 | // developer wants to display a preview we must use a SurfaceHolder. If the developer doesn't
79 | // want to display a preview we use a SurfaceTexture if we are running at least Honeycomb.
80 | private var usingSurfaceTexture: Boolean = false
81 |
82 | /**
83 | * Dedicated thread and associated runnable for calling into the detector with frames, as the
84 | * frames become available from the camera.
85 | */
86 | private var processingThread: Thread? = null
87 |
88 | private val processingRunnable: FrameProcessingRunnable
89 |
90 | private val processorLock = Any()
91 | // @GuardedBy("processorLock")
92 | private var frameProcessor: IFrameProcessor? = null
93 |
94 | /**
95 | * Map to convert between a byte array, received from the camera, and its associated byte buffer.
96 | * We use byte buffers internally because this is a more efficient way to call into native code
97 | * later (avoids a potential copy).
98 | *
99 | *
100 | * **Note:** uses IdentityHashMap here instead of HashMap because the behavior of an array's
101 | * equals, hashCode and toString methods is both useless and unexpected. IdentityHashMap enforces
102 | * identity ('==') check on the keys.
103 | */
104 | private val bytesToByteBuffer = IdentityHashMap()
105 |
106 | init {
107 | graphicOverlay.clear()
108 | processingRunnable = FrameProcessingRunnable()
109 |
110 | if (Camera.getNumberOfCameras() == 1) {
111 | val cameraInfo = CameraInfo()
112 | Camera.getCameraInfo(0, cameraInfo)
113 | cameraFacing = cameraInfo.facing
114 | }
115 | }
116 |
117 | // ==============================================================================================
118 | // Public
119 | // ==============================================================================================
120 |
121 | /** Stops the camera and releases the resources of the camera and underlying detector. */
122 | override fun release() {
123 | synchronized(processorLock) {
124 | stop()
125 | processingRunnable.release()
126 | cleanScreen()
127 |
128 | if (frameProcessor != null) {
129 | frameProcessor!!.stop()
130 | }
131 | }
132 | }
133 |
134 | /**
135 | * Opens the camera and starts sending preview frames to the underlying detector. The supplied
136 | * surface holder is used for the preview so frames can be displayed to the user.
137 | *
138 | * @param surfaceHolder the surface holder to use for the preview frames
139 | * @throws IOException if the supplied surface holder could not be used as the preview display
140 | */
141 | @RequiresPermission(Manifest.permission.CAMERA)
142 | @Synchronized
143 | @Throws(IOException::class)
144 | override fun start(surfaceHolder: SurfaceHolder) {
145 | if (camera == null) {
146 | camera = createCamera()
147 | camera!!.setPreviewDisplay(surfaceHolder)
148 | camera!!.startPreview()
149 |
150 | processingThread = Thread(processingRunnable)
151 | processingRunnable.setActive(true)
152 | processingThread!!.start()
153 |
154 | usingSurfaceTexture = false
155 | }
156 | }
157 |
158 | /**
159 | * Closes the camera and stops sending frames to the underlying frame detector.
160 | *
161 | *
162 | * This camera source may be restarted again by calling [.start] or [ ][.start].
163 | *
164 | *
165 | * Call [.release] instead to completely shut down this camera source and release the
166 | * resources of the underlying detector.
167 | */
168 | @Synchronized
169 | override fun stop() {
170 | processingRunnable.setActive(false)
171 | if (processingThread != null) {
172 | try {
173 | // Wait for the thread to complete to ensure that we can't have multiple threads
174 | // executing at the same time (i.e., which would happen if we called start too
175 | // quickly after stop).
176 | processingThread!!.join()
177 | } catch (e: InterruptedException) {
178 | Log.d(TAG, "Frame processing thread interrupted on release.")
179 | }
180 |
181 | processingThread = null
182 | }
183 |
184 | if (camera != null) {
185 | camera!!.stopPreview()
186 | camera!!.setPreviewCallbackWithBuffer(null)
187 | try {
188 | if (usingSurfaceTexture) {
189 | camera!!.setPreviewTexture(null)
190 | } else {
191 | camera!!.setPreviewDisplay(null)
192 | }
193 | } catch (e: Exception) {
194 | Log.e(TAG, "Failed to clear camera preview: $e")
195 | }
196 |
197 | camera!!.release()
198 | camera = null
199 | }
200 |
201 | // Release the reference to any image buffers, since these will no longer be in use.
202 | bytesToByteBuffer.clear()
203 | }
204 |
205 | /** Changes the facing of the camera. */
206 | @Synchronized
207 | fun setFacing(facing: Int) {
208 | if (facing != GraphicOverlay.CAMERA_FACING_BACK && facing != GraphicOverlay.CAMERA_FACING_FRONT) {
209 | throw IllegalArgumentException("Invalid camera: $facing")
210 | }
211 | this.cameraFacing = facing
212 | }
213 |
214 | /**
215 | * Opens the camera and applies the user settings.
216 | *
217 | * @throws IOException if camera cannot be found or preview cannot be processed
218 | */
219 | @SuppressLint("InlinedApi")
220 | @Throws(IOException::class)
221 | private fun createCamera(): Camera {
222 | val requestedCameraId = getIdForRequestedCamera(cameraFacing)
223 | if (requestedCameraId == -1) {
224 | throw IOException("Could not find requested camera.")
225 | }
226 | val camera = Camera.open(requestedCameraId)
227 |
228 | val sizePair = selectSizePair(camera, requestedPreviewWidth, requestedPreviewHeight)
229 | ?: throw IOException("Could not find suitable preview size.")
230 | val pictureSize = sizePair.pictureSize()
231 | previewSize = sizePair.previewSize()
232 |
233 | val previewFpsRange = selectPreviewFpsRange(camera, requestedFps)
234 | ?: throw IOException("Could not find suitable preview frames per second range.")
235 |
236 | val parameters = camera.parameters
237 |
238 | if (pictureSize != null) {
239 | parameters.setPictureSize(pictureSize.width, pictureSize.height)
240 | }
241 | parameters.setPreviewSize(previewSize!!.width, previewSize!!.height)
242 | parameters.setPreviewFpsRange(
243 | previewFpsRange[Camera.Parameters.PREVIEW_FPS_MIN_INDEX],
244 | previewFpsRange[Camera.Parameters.PREVIEW_FPS_MAX_INDEX]
245 | )
246 | parameters.previewFormat = ImageFormat.NV21
247 |
248 | setRotation(camera, parameters, requestedCameraId)
249 |
250 | if (requestedAutoFocus) {
251 | if (parameters
252 | .supportedFocusModes
253 | .contains(Camera.Parameters.FOCUS_MODE_CONTINUOUS_VIDEO)
254 | ) {
255 | parameters.focusMode = Camera.Parameters.FOCUS_MODE_CONTINUOUS_VIDEO
256 | } else {
257 | Log.i(TAG, "Camera auto focus is not supported on this device.")
258 | }
259 | }
260 |
261 | camera.parameters = parameters
262 |
263 | // Four frame buffers are needed for working with the camera:
264 | //
265 | // one for the frame that is currently being executed upon in doing detection
266 | // one for the next pending frame to process immediately upon completing detection
267 | // two for the frames that the camera uses to populate future preview images
268 | //
269 | // Through trial and error it appears that two free buffers, in addition to the two buffers
270 | // used in this code, are needed for the camera to work properly. Perhaps the camera has
271 | // one thread for acquiring images, and another thread for calling into user code. If only
272 | // three buffers are used, then the camera will spew thousands of warning messages when
273 | // detection takes a non-trivial amount of time.
274 | camera.setPreviewCallbackWithBuffer(CameraPreviewCallback())
275 | camera.addCallbackBuffer(createPreviewBuffer(previewSize!!))
276 | camera.addCallbackBuffer(createPreviewBuffer(previewSize!!))
277 | camera.addCallbackBuffer(createPreviewBuffer(previewSize!!))
278 | camera.addCallbackBuffer(createPreviewBuffer(previewSize!!))
279 |
280 | return camera
281 | }
282 |
283 | /**
284 | * Stores a preview size and a corresponding same-aspect-ratio picture size. To avoid distorted
285 | * preview images on some devices, the picture size must be set to a size that is the same aspect
286 | * ratio as the preview size or the preview may end up being distorted. If the picture size is
287 | * null, then there is no picture size with the same aspect ratio as the preview size.
288 | */
289 | private class SizePair internal constructor(
290 | previewSize: Camera.Size,
291 | pictureSize: Camera.Size?
292 | ) {
293 | private val preview: Size = Size(previewSize.width, previewSize.height)
294 | private var picture: Size? = null
295 |
296 | init {
297 | if (pictureSize != null) {
298 | picture = Size(pictureSize.width, pictureSize.height)
299 | }
300 | }
301 |
302 | internal fun previewSize(): Size {
303 | return preview
304 | }
305 |
306 | internal fun pictureSize(): Size? {
307 | return picture
308 | }
309 | }
310 |
311 | /**
312 | * Calculates the correct rotation for the given camera id and sets the rotation in the
313 | * parameters. It also sets the camera's display orientation and rotation.
314 | *
315 | * @param parameters the camera parameters for which to set the rotation
316 | * @param cameraId the camera id to set rotation based on
317 | */
318 | private fun setRotation(camera: Camera, parameters: Camera.Parameters, cameraId: Int) {
319 | val windowManager = activity.getSystemService(Context.WINDOW_SERVICE) as WindowManager
320 | var degrees = 0
321 | val rotation = windowManager.defaultDisplay.rotation
322 | when (rotation) {
323 | Surface.ROTATION_0 -> degrees = 0
324 | Surface.ROTATION_90 -> degrees = 90
325 | Surface.ROTATION_180 -> degrees = 180
326 | Surface.ROTATION_270 -> degrees = 270
327 | else -> Log.e(TAG, "Bad rotation value: $rotation")
328 | }
329 |
330 | val cameraInfo = CameraInfo()
331 | Camera.getCameraInfo(cameraId, cameraInfo)
332 |
333 | val angle: Int
334 | val displayAngle: Int
335 | if (cameraInfo.facing == CameraInfo.CAMERA_FACING_FRONT) {
336 | angle = (cameraInfo.orientation + degrees) % 360
337 | displayAngle = (360 - angle) % 360 // compensate for it being mirrored
338 | } else { // back-facing
339 | angle = (cameraInfo.orientation - degrees + 360) % 360
340 | displayAngle = angle
341 | }
342 |
343 | // This corresponds to the rotation constants.
344 | this.rotation = angle / 90
345 |
346 | camera.setDisplayOrientation(displayAngle)
347 | parameters.setRotation(angle)
348 | }
349 |
350 | /**
351 | * Creates one buffer for the camera preview callback. The size of the buffer is based off of the
352 | * camera preview size and the format of the camera image.
353 | *
354 | * @return a new preview buffer of the appropriate size for the current camera settings
355 | */
356 | @SuppressLint("InlinedApi")
357 | private fun createPreviewBuffer(previewSize: Size): ByteArray {
358 | val bitsPerPixel = ImageFormat.getBitsPerPixel(ImageFormat.NV21)
359 | val sizeInBits = previewSize.height.toLong() * previewSize.width.toLong() * bitsPerPixel.toLong()
360 | val bufferSize = Math.ceil(sizeInBits / 8.0).toInt() + 1
361 |
362 | // Creating the byte array this way and wrapping it, as opposed to using .allocate(),
363 | // should guarantee that there will be an array to work with.
364 | val byteArray = ByteArray(bufferSize)
365 | val buffer = ByteBuffer.wrap(byteArray)
366 | if (!buffer.hasArray() || buffer.array() != byteArray) {
367 | // I don't think that this will ever happen. But if it does, then we wouldn't be
368 | // passing the preview content to the underlying detector later.
369 | throw IllegalStateException("Failed to create valid buffer for camera source.")
370 | }
371 |
372 | bytesToByteBuffer[byteArray] = buffer
373 | return byteArray
374 | }
375 |
376 | // ==============================================================================================
377 | // Frame processing
378 | // ==============================================================================================
379 |
380 | /** Called when the camera has a new preview frame. */
381 | private inner class CameraPreviewCallback : Camera.PreviewCallback {
382 | override fun onPreviewFrame(data: ByteArray, camera: Camera) {
383 | processingRunnable.setNextFrame(data, camera)
384 | }
385 | }
386 |
387 | fun setMachineLearningFrameProcessor(processor: IFrameProcessor) {
388 | synchronized(processorLock) {
389 | cleanScreen()
390 | if (frameProcessor != null) {
391 | frameProcessor!!.stop()
392 | }
393 | frameProcessor = processor
394 | }
395 | }
396 |
397 | /**
398 | * This runnable controls access to the underlying receiver, calling it to process frames when
399 | * available from the camera. This is designed to run detection on frames as fast as possible
400 | * (i.e., without unnecessary context switching or waiting on the next frame).
401 | *
402 | *
403 | * While detection is running on a frame, new frames may be received from the camera. As these
404 | * frames come in, the most recent frame is held onto as pending. As soon as detection and its
405 | * associated processing is done for the previous frame, detection on the mostly recently received
406 | * frame will immediately start on the same thread.
407 | */
408 | private inner class FrameProcessingRunnable internal constructor() : Runnable {
409 |
410 | // This lock guards all of the member variables below.
411 | private val lock = Object()
412 | private var active = true
413 |
414 | // These pending variables hold the state associated with the new frame awaiting processing.
415 | private var pendingFrameData: ByteBuffer? = null
416 |
417 | /**
418 | * Releases the underlying receiver. This is only safe to do after the associated thread has
419 | * completed, which is managed in camera source's release method above.
420 | */
421 | @SuppressLint("Assert")
422 | internal fun release() {
423 | assert(processingThread?.state == State.TERMINATED)
424 | }
425 |
426 | /** Marks the runnable as active/not active. Signals any blocked threads to continue. */
427 | internal fun setActive(active: Boolean) {
428 | synchronized(lock) {
429 | this.active = active
430 | lock.notifyAll()
431 | }
432 | }
433 |
434 | /**
435 | * Sets the frame data received from the camera. This adds the previous unused frame buffer (if
436 | * present) back to the camera, and keeps a pending reference to the frame data for future use.
437 | */
438 | internal fun setNextFrame(data: ByteArray, camera: Camera) {
439 | synchronized(lock) {
440 | if (pendingFrameData != null) {
441 | camera.addCallbackBuffer(pendingFrameData!!.array())
442 | pendingFrameData = null
443 | }
444 |
445 | if (!bytesToByteBuffer.containsKey(data)) {
446 | Log.d(
447 | TAG,
448 | "Skipping frame. Could not find ByteBuffer associated with the image " + "data from the camera."
449 | )
450 | return
451 | }
452 |
453 | pendingFrameData = bytesToByteBuffer[data]
454 |
455 | // Notify the processor thread if it is waiting on the next frame (see below).
456 | lock.notifyAll()
457 | }
458 | }
459 |
460 | /**
461 | * As long as the processing thread is active, this executes detection on frames continuously.
462 | * The next pending frame is either immediately available or hasn't been received yet. Once it
463 | * is available, we transfer the frame info to local variables and run detection on that frame.
464 | * It immediately loops back for the next frame without pausing.
465 | *
466 | *
467 | * If detection takes longer than the time in between new frames from the camera, this will
468 | * mean that this loop will run without ever waiting on a frame, avoiding any context switching
469 | * or frame acquisition time latency.
470 | *
471 | *
472 | * If you find that this is using more CPU than you'd like, you should probably decrease the
473 | * FPS setting above to allow for some idle time in between frames.
474 | */
475 | @SuppressLint("InlinedApi")
476 | override fun run() {
477 | var data: ByteBuffer
478 |
479 | while (true) {
480 | synchronized(lock) {
481 | while (active && pendingFrameData == null) {
482 | try {
483 | // Wait for the next frame to be received from the camera, since we
484 | // don't have it yet.
485 | lock.wait()
486 | } catch (e: InterruptedException) {
487 | Log.d(TAG, "Frame processing loop terminated.", e)
488 | return
489 | }
490 |
491 | }
492 |
493 | if (!active) {
494 | // Exit the loop once this camera source is stopped or released. We check
495 | // this here, immediately after the wait() above, to handle the case where
496 | // setActive(false) had been called, triggering the termination of this
497 | // loop.
498 | return
499 | }
500 |
501 | // Hold onto the frame data locally, so that we can use this for detection
502 | // below. We need to clear pendingFrameData to ensure that this buffer isn't
503 | // recycled back to the camera before we are done using that data.
504 | data = pendingFrameData!!
505 | pendingFrameData = null
506 | }
507 |
508 | // The code below needs to run outside of synchronization, because this will allow
509 | // the camera to add pending frame(s) while we are running detection on the current
510 | // frame.
511 |
512 | try {
513 | synchronized(processorLock) {
514 | Log.d(TAG, "Process an image")
515 | frameProcessor!!.process(
516 | data,
517 | FrameMetadata.Builder()
518 | .setWidth(previewSize!!.width)
519 | .setHeight(previewSize!!.height)
520 | .setRotation(rotation)
521 | .setCameraFacing(cameraFacing)
522 | .build()
523 | )
524 | }
525 | } catch (t: Throwable) {
526 | Log.e(TAG, "Exception thrown from receiver.", t)
527 | } finally {
528 | camera!!.addCallbackBuffer(data.array())
529 | }
530 | }
531 | }
532 | }
533 |
534 | /** Cleans up graphicOverlay and child classes can do their cleanups as well . */
535 | private fun cleanScreen() {
536 | graphicOverlay.clear()
537 | }
538 |
539 | companion object {
540 | private const val TAG = "MLCameraSource"
541 |
542 | /**
543 | * If the absolute difference between a preview size aspect ratio and a picture size aspect ratio
544 | * is less than this tolerance, they are considered to be the same aspect ratio.
545 | */
546 | private const val ASPECT_RATIO_TOLERANCE = 0.01f
547 |
548 | /**
549 | * Gets the id for the camera specified by the direction it is facing. Returns -1 if no such
550 | * camera was found.
551 | *
552 | * @param facing the desired camera (front-facing or rear-facing)
553 | */
554 | private fun getIdForRequestedCamera(facing: Int): Int {
555 | val cameraInfo = CameraInfo()
556 | for (i in 0 until Camera.getNumberOfCameras()) {
557 | Camera.getCameraInfo(i, cameraInfo)
558 | if (cameraInfo.facing == facing) {
559 | return i
560 | }
561 | }
562 | return -1
563 | }
564 |
565 | /**
566 | * Selects the most suitable preview and picture size, given the desired width and height.
567 | *
568 | *
569 | * Even though we only need to find the preview size, it's necessary to find both the preview
570 | * size and the picture size of the camera together, because these need to have the same aspect
571 | * ratio. On some hardware, if you would only set the preview size, you will get a distorted
572 | * image.
573 | *
574 | * @param camera the camera to select a preview size from
575 | * @param desiredWidth the desired width of the camera preview frames
576 | * @param desiredHeight the desired height of the camera preview frames
577 | * @return the selected preview and picture size pair
578 | */
579 | private fun selectSizePair(camera: Camera, desiredWidth: Int, desiredHeight: Int): SizePair? {
580 | val validPreviewSizes = generateValidPreviewSizeList(camera)
581 |
582 | // The method for selecting the best size is to minimize the sum of the differences between
583 | // the desired values and the actual values for width and height. This is certainly not the
584 | // only way to select the best size, but it provides a decent tradeoff between using the
585 | // closest aspect ratio vs. using the closest pixel area.
586 | var selectedPair: SizePair? = null
587 | var minDiff = Integer.MAX_VALUE
588 | for (sizePair in validPreviewSizes) {
589 | val size = sizePair.previewSize()
590 | val diff = Math.abs(size.width - desiredWidth) + Math.abs(size.height - desiredHeight)
591 | if (diff < minDiff) {
592 | selectedPair = sizePair
593 | minDiff = diff
594 | }
595 | }
596 |
597 | return selectedPair
598 | }
599 |
600 | /**
601 | * Generates a list of acceptable preview sizes. Preview sizes are not acceptable if there is not
602 | * a corresponding picture size of the same aspect ratio. If there is a corresponding picture size
603 | * of the same aspect ratio, the picture size is paired up with the preview size.
604 | *
605 | *
606 | * This is necessary because even if we don't use still pictures, the still picture size must
607 | * be set to a size that is the same aspect ratio as the preview size we choose. Otherwise, the
608 | * preview images may be distorted on some devices.
609 | */
610 | private fun generateValidPreviewSizeList(camera: Camera): List {
611 | val parameters = camera.parameters
612 | val supportedPreviewSizes = parameters.supportedPreviewSizes
613 | val supportedPictureSizes = parameters.supportedPictureSizes
614 | val validPreviewSizes = ArrayList()
615 | for (previewSize in supportedPreviewSizes) {
616 | val previewAspectRatio = previewSize.width.toFloat() / previewSize.height.toFloat()
617 |
618 | // By looping through the picture sizes in order, we favor the higher resolutions.
619 | // We choose the highest resolution in order to support taking the full resolution
620 | // picture later.
621 | for (pictureSize in supportedPictureSizes) {
622 | val pictureAspectRatio = pictureSize.width.toFloat() / pictureSize.height.toFloat()
623 | if (Math.abs(previewAspectRatio - pictureAspectRatio) < ASPECT_RATIO_TOLERANCE) {
624 | validPreviewSizes.add(SizePair(previewSize, pictureSize))
625 | break
626 | }
627 | }
628 | }
629 |
630 | // If there are no picture sizes with the same aspect ratio as any preview sizes, allow all
631 | // of the preview sizes and hope that the camera can handle it. Probably unlikely, but we
632 | // still account for it.
633 | if (validPreviewSizes.size == 0) {
634 | Log.w(TAG, "No preview sizes have a corresponding same-aspect-ratio picture size")
635 | for (previewSize in supportedPreviewSizes) {
636 | // The null picture size will let us know that we shouldn't set a picture size.
637 | validPreviewSizes.add(SizePair(previewSize, null))
638 | }
639 | }
640 |
641 | return validPreviewSizes
642 | }
643 |
644 | /**
645 | * Selects the most suitable preview frames per second range, given the desired frames per second.
646 | *
647 | * @param camera the camera to select a frames per second range from
648 | * @param desiredPreviewFps the desired frames per second for the camera preview frames
649 | * @return the selected preview frames per second range
650 | */
651 | @SuppressLint("InlinedApi")
652 | private fun selectPreviewFpsRange(camera: Camera, desiredPreviewFps: Float): IntArray? {
653 | // The camera API uses integers scaled by a factor of 1000 instead of floating-point frame
654 | // rates.
655 | val desiredPreviewFpsScaled = (desiredPreviewFps * 1000.0f).toInt()
656 |
657 | // The method for selecting the best range is to minimize the sum of the differences between
658 | // the desired value and the upper and lower bounds of the range. This may select a range
659 | // that the desired value is outside of, but this is often preferred. For example, if the
660 | // desired frame rate is 29.97, the range (30, 30) is probably more desirable than the
661 | // range (15, 30).
662 | var selectedFpsRange: IntArray? = null
663 | var minDiff = Integer.MAX_VALUE
664 | val previewFpsRangeList = camera.parameters.supportedPreviewFpsRange
665 | for (range in previewFpsRangeList) {
666 | val deltaMin = desiredPreviewFpsScaled - range[Camera.Parameters.PREVIEW_FPS_MIN_INDEX]
667 | val deltaMax = desiredPreviewFpsScaled - range[Camera.Parameters.PREVIEW_FPS_MAX_INDEX]
668 | val diff = Math.abs(deltaMin) + Math.abs(deltaMax)
669 | if (diff < minDiff) {
670 | selectedFpsRange = range
671 | minDiff = diff
672 | }
673 | }
674 | return selectedFpsRange
675 | }
676 | }
677 | }
678 |
--------------------------------------------------------------------------------
/app/src/main/res/layout/activity_main.xml:
--------------------------------------------------------------------------------
1 |
2 |
7 |
8 |
12 |
13 |
14 |
--------------------------------------------------------------------------------
/app/src/main/res/mipmap-hdpi/ic_launcher.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apkelly/devnibbles_facial_recognition_with_android/e0e34196e844f4a40769755f824fc86bc68fed53/app/src/main/res/mipmap-hdpi/ic_launcher.png
--------------------------------------------------------------------------------
/app/src/main/res/mipmap-mdpi/ic_launcher.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apkelly/devnibbles_facial_recognition_with_android/e0e34196e844f4a40769755f824fc86bc68fed53/app/src/main/res/mipmap-mdpi/ic_launcher.png
--------------------------------------------------------------------------------
/app/src/main/res/mipmap-xhdpi/ic_launcher.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apkelly/devnibbles_facial_recognition_with_android/e0e34196e844f4a40769755f824fc86bc68fed53/app/src/main/res/mipmap-xhdpi/ic_launcher.png
--------------------------------------------------------------------------------
/app/src/main/res/mipmap-xxhdpi/ic_launcher.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apkelly/devnibbles_facial_recognition_with_android/e0e34196e844f4a40769755f824fc86bc68fed53/app/src/main/res/mipmap-xxhdpi/ic_launcher.png
--------------------------------------------------------------------------------
/app/src/main/res/mipmap-xxxhdpi/ic_launcher.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apkelly/devnibbles_facial_recognition_with_android/e0e34196e844f4a40769755f824fc86bc68fed53/app/src/main/res/mipmap-xxxhdpi/ic_launcher.png
--------------------------------------------------------------------------------
/app/src/main/res/values/colors.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | #008577
4 | #00574B
5 | #D81B60
6 |
7 |
--------------------------------------------------------------------------------
/app/src/main/res/values/strings.xml:
--------------------------------------------------------------------------------
1 |
2 | DevNibbles
3 | OK
4 | Access to the camera is needed for detection
5 | This application cannot run because it does not have the camera permission. The application will now exit.
6 | Face detector dependencies cannot be downloaded due to low device storage
7 |
8 |
--------------------------------------------------------------------------------
/app/src/main/res/values/styles.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/build.gradle:
--------------------------------------------------------------------------------
1 | // Top-level build file where you can add configuration options common to all sub-projects/modules.
2 |
3 | buildscript {
4 | ext.kotlin_version = '1.3.21'
5 | repositories {
6 | google()
7 | jcenter()
8 |
9 | }
10 | dependencies {
11 | classpath 'com.android.tools.build:gradle:3.3.2'
12 | classpath 'com.google.gms:google-services:4.2.0'
13 | classpath "org.jetbrains.kotlin:kotlin-gradle-plugin:$kotlin_version"
14 | // NOTE: Do not place your application dependencies here; they belong
15 | // in the individual module build.gradle files
16 | }
17 | }
18 |
19 | allprojects {
20 | repositories {
21 | google()
22 | jcenter()
23 |
24 | }
25 | }
26 |
27 | task clean(type: Delete) {
28 | delete rootProject.buildDir
29 | }
30 |
--------------------------------------------------------------------------------
/gradle.properties:
--------------------------------------------------------------------------------
1 | # Project-wide Gradle settings.
2 | # IDE (e.g. Android Studio) users:
3 | # Gradle settings configured through the IDE *will override*
4 | # any settings specified in this file.
5 | # For more details on how to configure your build environment visit
6 | # http://www.gradle.org/docs/current/userguide/build_environment.html
7 | # Specifies the JVM arguments used for the daemon process.
8 | # The setting is particularly useful for tweaking memory settings.
9 | org.gradle.jvmargs=-Xmx1536m
10 | # When configured, Gradle will run in incubating parallel mode.
11 | # This option should only be used with decoupled projects. More details, visit
12 | # http://www.gradle.org/docs/current/userguide/multi_project_builds.html#sec:decoupled_projects
13 | # org.gradle.parallel=true
14 | # AndroidX package structure to make it clearer which packages are bundled with the
15 | # Android operating system, and which are packaged with your app's APK
16 | # https://developer.android.com/topic/libraries/support-library/androidx-rn
17 | android.useAndroidX=true
18 | # Automatically convert third-party libraries to use AndroidX
19 | android.enableJetifier=true
20 | # Kotlin code style for this project: "official" or "obsolete":
21 | kotlin.code.style=official
22 |
--------------------------------------------------------------------------------
/gradle/wrapper/gradle-wrapper.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apkelly/devnibbles_facial_recognition_with_android/e0e34196e844f4a40769755f824fc86bc68fed53/gradle/wrapper/gradle-wrapper.jar
--------------------------------------------------------------------------------
/gradle/wrapper/gradle-wrapper.properties:
--------------------------------------------------------------------------------
1 | #Wed Jan 16 11:05:24 AEDT 2019
2 | distributionBase=GRADLE_USER_HOME
3 | distributionPath=wrapper/dists
4 | zipStoreBase=GRADLE_USER_HOME
5 | zipStorePath=wrapper/dists
6 | distributionUrl=https\://services.gradle.org/distributions/gradle-4.10.1-all.zip
7 |
--------------------------------------------------------------------------------
/gradlew:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env sh
2 |
3 | ##############################################################################
4 | ##
5 | ## Gradle start up script for UN*X
6 | ##
7 | ##############################################################################
8 |
9 | # Attempt to set APP_HOME
10 | # Resolve links: $0 may be a link
11 | PRG="$0"
12 | # Need this for relative symlinks.
13 | while [ -h "$PRG" ] ; do
14 | ls=`ls -ld "$PRG"`
15 | link=`expr "$ls" : '.*-> \(.*\)$'`
16 | if expr "$link" : '/.*' > /dev/null; then
17 | PRG="$link"
18 | else
19 | PRG=`dirname "$PRG"`"/$link"
20 | fi
21 | done
22 | SAVED="`pwd`"
23 | cd "`dirname \"$PRG\"`/" >/dev/null
24 | APP_HOME="`pwd -P`"
25 | cd "$SAVED" >/dev/null
26 |
27 | APP_NAME="Gradle"
28 | APP_BASE_NAME=`basename "$0"`
29 |
30 | # Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
31 | DEFAULT_JVM_OPTS=""
32 |
33 | # Use the maximum available, or set MAX_FD != -1 to use that value.
34 | MAX_FD="maximum"
35 |
36 | warn () {
37 | echo "$*"
38 | }
39 |
40 | die () {
41 | echo
42 | echo "$*"
43 | echo
44 | exit 1
45 | }
46 |
47 | # OS specific support (must be 'true' or 'false').
48 | cygwin=false
49 | msys=false
50 | darwin=false
51 | nonstop=false
52 | case "`uname`" in
53 | CYGWIN* )
54 | cygwin=true
55 | ;;
56 | Darwin* )
57 | darwin=true
58 | ;;
59 | MINGW* )
60 | msys=true
61 | ;;
62 | NONSTOP* )
63 | nonstop=true
64 | ;;
65 | esac
66 |
67 | CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar
68 |
69 | # Determine the Java command to use to start the JVM.
70 | if [ -n "$JAVA_HOME" ] ; then
71 | if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
72 | # IBM's JDK on AIX uses strange locations for the executables
73 | JAVACMD="$JAVA_HOME/jre/sh/java"
74 | else
75 | JAVACMD="$JAVA_HOME/bin/java"
76 | fi
77 | if [ ! -x "$JAVACMD" ] ; then
78 | die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME
79 |
80 | Please set the JAVA_HOME variable in your environment to match the
81 | location of your Java installation."
82 | fi
83 | else
84 | JAVACMD="java"
85 | which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
86 |
87 | Please set the JAVA_HOME variable in your environment to match the
88 | location of your Java installation."
89 | fi
90 |
91 | # Increase the maximum file descriptors if we can.
92 | if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then
93 | MAX_FD_LIMIT=`ulimit -H -n`
94 | if [ $? -eq 0 ] ; then
95 | if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then
96 | MAX_FD="$MAX_FD_LIMIT"
97 | fi
98 | ulimit -n $MAX_FD
99 | if [ $? -ne 0 ] ; then
100 | warn "Could not set maximum file descriptor limit: $MAX_FD"
101 | fi
102 | else
103 | warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT"
104 | fi
105 | fi
106 |
107 | # For Darwin, add options to specify how the application appears in the dock
108 | if $darwin; then
109 | GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\""
110 | fi
111 |
112 | # For Cygwin, switch paths to Windows format before running java
113 | if $cygwin ; then
114 | APP_HOME=`cygpath --path --mixed "$APP_HOME"`
115 | CLASSPATH=`cygpath --path --mixed "$CLASSPATH"`
116 | JAVACMD=`cygpath --unix "$JAVACMD"`
117 |
118 | # We build the pattern for arguments to be converted via cygpath
119 | ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null`
120 | SEP=""
121 | for dir in $ROOTDIRSRAW ; do
122 | ROOTDIRS="$ROOTDIRS$SEP$dir"
123 | SEP="|"
124 | done
125 | OURCYGPATTERN="(^($ROOTDIRS))"
126 | # Add a user-defined pattern to the cygpath arguments
127 | if [ "$GRADLE_CYGPATTERN" != "" ] ; then
128 | OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)"
129 | fi
130 | # Now convert the arguments - kludge to limit ourselves to /bin/sh
131 | i=0
132 | for arg in "$@" ; do
133 | CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -`
134 | CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option
135 |
136 | if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition
137 | eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"`
138 | else
139 | eval `echo args$i`="\"$arg\""
140 | fi
141 | i=$((i+1))
142 | done
143 | case $i in
144 | (0) set -- ;;
145 | (1) set -- "$args0" ;;
146 | (2) set -- "$args0" "$args1" ;;
147 | (3) set -- "$args0" "$args1" "$args2" ;;
148 | (4) set -- "$args0" "$args1" "$args2" "$args3" ;;
149 | (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;;
150 | (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;;
151 | (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;;
152 | (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;;
153 | (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;;
154 | esac
155 | fi
156 |
157 | # Escape application args
158 | save () {
159 | for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done
160 | echo " "
161 | }
162 | APP_ARGS=$(save "$@")
163 |
164 | # Collect all arguments for the java command, following the shell quoting and substitution rules
165 | eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS"
166 |
167 | # by default we should be in the correct project dir, but when run from Finder on Mac, the cwd is wrong
168 | if [ "$(uname)" = "Darwin" ] && [ "$HOME" = "$PWD" ]; then
169 | cd "$(dirname "$0")"
170 | fi
171 |
172 | exec "$JAVACMD" "$@"
173 |
--------------------------------------------------------------------------------
/gradlew.bat:
--------------------------------------------------------------------------------
1 | @if "%DEBUG%" == "" @echo off
2 | @rem ##########################################################################
3 | @rem
4 | @rem Gradle startup script for Windows
5 | @rem
6 | @rem ##########################################################################
7 |
8 | @rem Set local scope for the variables with windows NT shell
9 | if "%OS%"=="Windows_NT" setlocal
10 |
11 | set DIRNAME=%~dp0
12 | if "%DIRNAME%" == "" set DIRNAME=.
13 | set APP_BASE_NAME=%~n0
14 | set APP_HOME=%DIRNAME%
15 |
16 | @rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
17 | set DEFAULT_JVM_OPTS=
18 |
19 | @rem Find java.exe
20 | if defined JAVA_HOME goto findJavaFromJavaHome
21 |
22 | set JAVA_EXE=java.exe
23 | %JAVA_EXE% -version >NUL 2>&1
24 | if "%ERRORLEVEL%" == "0" goto init
25 |
26 | echo.
27 | echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
28 | echo.
29 | echo Please set the JAVA_HOME variable in your environment to match the
30 | echo location of your Java installation.
31 |
32 | goto fail
33 |
34 | :findJavaFromJavaHome
35 | set JAVA_HOME=%JAVA_HOME:"=%
36 | set JAVA_EXE=%JAVA_HOME%/bin/java.exe
37 |
38 | if exist "%JAVA_EXE%" goto init
39 |
40 | echo.
41 | echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME%
42 | echo.
43 | echo Please set the JAVA_HOME variable in your environment to match the
44 | echo location of your Java installation.
45 |
46 | goto fail
47 |
48 | :init
49 | @rem Get command-line arguments, handling Windows variants
50 |
51 | if not "%OS%" == "Windows_NT" goto win9xME_args
52 |
53 | :win9xME_args
54 | @rem Slurp the command line arguments.
55 | set CMD_LINE_ARGS=
56 | set _SKIP=2
57 |
58 | :win9xME_args_slurp
59 | if "x%~1" == "x" goto execute
60 |
61 | set CMD_LINE_ARGS=%*
62 |
63 | :execute
64 | @rem Setup the command line
65 |
66 | set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar
67 |
68 | @rem Execute Gradle
69 | "%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS%
70 |
71 | :end
72 | @rem End local scope for the variables with windows NT shell
73 | if "%ERRORLEVEL%"=="0" goto mainEnd
74 |
75 | :fail
76 | rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of
77 | rem the _cmd.exe /c_ return code!
78 | if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1
79 | exit /b 1
80 |
81 | :mainEnd
82 | if "%OS%"=="Windows_NT" endlocal
83 |
84 | :omega
85 |
--------------------------------------------------------------------------------
/settings.gradle:
--------------------------------------------------------------------------------
1 | include ':app'
2 |
--------------------------------------------------------------------------------