├── .gitignore ├── .idea └── codeStyles │ └── codeStyleConfig.xml ├── README.md ├── app ├── .gitignore ├── build.gradle ├── proguard-rules.pro └── src │ ├── androidTest │ └── java │ │ └── net │ │ └── yanzm │ │ └── mlkitsample │ │ └── ExampleInstrumentedTest.kt │ ├── main │ ├── AndroidManifest.xml │ ├── java │ │ └── net │ │ │ └── yanzm │ │ │ └── mlkitsample │ │ │ ├── GraphicData.kt │ │ │ ├── GraphicOverlay.kt │ │ │ ├── ImagePickFragment.kt │ │ │ └── MainActivity.kt │ └── res │ │ ├── drawable-v24 │ │ └── ic_launcher_foreground.xml │ │ ├── drawable │ │ └── ic_launcher_background.xml │ │ ├── layout │ │ ├── activity_main.xml │ │ └── fragment_image_pick.xml │ │ ├── mipmap-anydpi-v26 │ │ ├── ic_launcher.xml │ │ └── ic_launcher_round.xml │ │ ├── mipmap-hdpi │ │ ├── ic_launcher.png │ │ └── ic_launcher_round.png │ │ ├── mipmap-mdpi │ │ ├── ic_launcher.png │ │ └── ic_launcher_round.png │ │ ├── mipmap-xhdpi │ │ ├── ic_launcher.png │ │ └── ic_launcher_round.png │ │ ├── mipmap-xxhdpi │ │ ├── ic_launcher.png │ │ └── ic_launcher_round.png │ │ ├── mipmap-xxxhdpi │ │ ├── ic_launcher.png │ │ └── ic_launcher_round.png │ │ └── values │ │ ├── colors.xml │ │ ├── strings.xml │ │ └── styles.xml │ └── test │ └── java │ └── net │ └── yanzm │ └── mlkitsample │ └── ExampleUnitTest.kt ├── build.gradle ├── gradle └── wrapper │ ├── gradle-wrapper.jar │ └── gradle-wrapper.properties ├── gradlew ├── gradlew.bat ├── screenshots └── firebase_console.png └── settings.gradle /.gitignore: -------------------------------------------------------------------------------- 1 | local.properties 2 | ext.properties 3 | 4 | # for gradle, IDEA 5 | .gradle 6 | build/ 7 | *.iml 8 | !.idea/runConfigurations/* 9 | .idea/*.xml 10 | .idea/.name 11 | .idea/copyright/ 12 | .idea/libraries/ 13 | .idea/scopes/ 14 | .idea/inspectionProfiles/ 15 | .idea/dictionaries/ 16 | .idea/caches/build_file_checksums.ser 17 | .idea/codeStyles/Project.xml 18 | 19 | certificate/ 20 | apk/ 21 | .DS_Store 22 | gradle.properties 23 | -------------------------------------------------------------------------------- /.idea/codeStyles/codeStyleConfig.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 5 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # MLKitSample 2 | 3 | https://gdg-tokyo.connpass.com/event/95203/ の課題です。 4 | 5 | 6 | ## 資料 7 | 8 | https://speakerdeck.com/yanzm/first-step-of-ml-kit 9 | 10 | 11 | ## ステップ1 12 | 13 | 1. このプロジェクトを clone する 14 | 1. start branch に変更する `git checkout start` 15 | 1. Android Studio を起動する 16 | 1. Import project で clone したディレクトリを指定して開く 17 | 1. gradle の sync が終わるまで待つ 18 | 1. (やりたい人は applicationId やパッケージ名を変える) 19 | 20 | 21 | ## 課題1 22 | 23 | firebase console https://console.firebase.google.com/ で新しいプロジェクトを作る。 24 | 25 | * プロジェクト名 : 好きな名前(例 ML Kit Sample) 26 | * 国 / 地域 : 日本 27 | 28 | 29 | ## 課題2 30 | 31 | 1. 作った firebase プロジェクトに Android アプリを追加する(「Android アプリ に Firebase を追加」を選択する) 32 | 2. google-services.json をダウンロードし、app モジュール直下に置く 33 | 3. アプリを実行する 34 | 35 | dependency に 36 | ``` 37 | implementation "com.google.firebase:firebase-ml-vision:18.0.2" 38 | ``` 39 | を追加する。 40 | 41 | 42 | ## 課題3 : テキスト認識 43 | 44 | https://firebase.google.com/docs/ml-kit/android/recognize-text#on-device 45 | 46 | 「TODO: 1 on-device テキスト認識」を実装する。 47 | 48 | ```kotlin 49 | detectButton.isEnabled = false 50 | progressBar.visibility = View.VISIBLE 51 | 52 | val image = FirebaseVisionImage.fromBitmap(bitmap) 53 | 54 | FirebaseVision.getInstance() 55 | .onDeviceTextRecognizer 56 | .processImage(image) 57 | .addOnSuccessListener { texts -> 58 | detectButton.isEnabled = true 59 | progressBar.visibility = View.GONE 60 | 61 | for (block in texts.textBlocks) { 62 | for (line in block.lines) { 63 | for (element in line.elements) { 64 | element.boundingBox?.let { 65 | overlay.add(BoxData(element.text, it)) 66 | } 67 | } 68 | } 69 | } 70 | } 71 | .addOnFailureListener { e -> 72 | e.printStackTrace() 73 | detectButton.isEnabled = true 74 | progressBar.visibility = View.GONE 75 | Toast.makeText(this, e.message, Toast.LENGTH_SHORT).show() 76 | } 77 | ``` 78 | 79 | ## 課題4 : 顔検出 80 | 81 | https://firebase.google.com/docs/ml-kit/android/detect-faces#on-device 82 | 83 | 「TODO: 2 on-device 顔検出」を実装する。 84 | 85 | ```kotlin 86 | detectButton.isEnabled = false 87 | progressBar.visibility = View.VISIBLE 88 | 89 | val image = FirebaseVisionImage.fromBitmap(bitmap) 90 | 91 | FirebaseVision.getInstance() 92 | .visionFaceDetector 93 | .detectInImage(image) 94 | .addOnSuccessListener { faces -> 95 | detectButton.isEnabled = true 96 | progressBar.visibility = View.GONE 97 | 98 | for (face in faces) { 99 | face.boundingBox?.let { 100 | overlay.add(BoxData(face.smilingProbability.toString(), it)) 101 | } 102 | } 103 | } 104 | .addOnFailureListener { e -> 105 | e.printStackTrace() 106 | detectButton.isEnabled = true 107 | progressBar.visibility = View.GONE 108 | Toast.makeText(this, e.message, Toast.LENGTH_SHORT).show() 109 | } 110 | ``` 111 | 112 | ## 課題5 : 顔検出 113 | 114 | option を指定する。 115 | 116 | * `setPerformanceMode()` では速度優先か正確性優先かを指定できる。 117 | * `setLandmarkMode()` では eyes, ears, nose, cheeks, mouth の位置を検出するかどうかを指定できる。 118 | * `setClassificationMode()` では笑顔の度合い(0f〜1f)、目の開き具合(0f〜1f)を検出するかどうかを指定できる。 119 | * `setMinFaceSize()` では検出する最小の顔の大きさを画像に対する比率で指定できる。 120 | * `enableTracking()` では顔に id を割り振るかどうかを指定できる。 121 | 122 | ```kotlin 123 | val options = FirebaseVisionFaceDetectorOptions.Builder() 124 | .setPerformanceMode(FirebaseVisionFaceDetectorOptions.ACCURATE) // or FAST 125 | .setLandmarkMode(FirebaseVisionFaceDetectorOptions.ALL_LANDMARKS) // or NO_LANDMARKS 126 | .setClassificationMode(FirebaseVisionFaceDetectorOptions.ALL_CLASSIFICATIONS) // or NO_CLASSIFICATIONS 127 | .setContourMode(FirebaseVisionFaceDetectorOptions.NO_CONTOURS) // or ALL_CONTOURS 128 | .setMinFaceSize(0.15f) 129 | .enableTracking() 130 | .build() 131 | 132 | FirebaseVision.getInstance() 133 | .getVisionFaceDetector(options) 134 | .detectInImage(image) 135 | .addOnSuccessListener { faces -> 136 | detectButton.isEnabled = true 137 | progressBar.visibility = View.GONE 138 | 139 | for (face in faces) { 140 | face.boundingBox?.let { 141 | overlay.add(BoxData(face.smilingProbability.toString(), it)) 142 | } 143 | } 144 | } 145 | ... 146 | ``` 147 | 148 | ## 課題6 : バーコードスキャン 149 | 150 | https://firebase.google.com/docs/ml-kit/android/read-barcodes#configure-the-barcode-detector 151 | 152 | 「TODO: 3 on-device バーコードスキャン」を実装する。 153 | 154 | ```kotlin 155 | detectButton.isEnabled = false 156 | progressBar.visibility = View.VISIBLE 157 | 158 | val image = FirebaseVisionImage.fromBitmap(bitmap) 159 | 160 | FirebaseVision.getInstance() 161 | .visionBarcodeDetector 162 | .detectInImage(image) 163 | .addOnSuccessListener { barcodes -> 164 | detectButton.isEnabled = true 165 | progressBar.visibility = View.GONE 166 | 167 | for (barcode in barcodes) { 168 | barcode.boundingBox?.let { 169 | overlay.add(BoxData(barcode.rawValue ?: "", it)) 170 | } 171 | } 172 | } 173 | .addOnFailureListener { e -> 174 | e.printStackTrace() 175 | detectButton.isEnabled = true 176 | progressBar.visibility = View.GONE 177 | Toast.makeText(this, e.message, Toast.LENGTH_SHORT).show() 178 | } 179 | ``` 180 | 181 | おまけ : option を指定する。option では検出するバーコードの種類を制限できる。 182 | 183 | ```kotlin 184 | val options = FirebaseVisionBarcodeDetectorOptions.Builder() 185 | .setBarcodeFormats( 186 | FirebaseVisionBarcode.FORMAT_EAN_8, 187 | FirebaseVisionBarcode.FORMAT_EAN_13 188 | ) 189 | .build() 190 | 191 | FirebaseVision.getInstance() 192 | .getVisionBarcodeDetector(options) 193 | .detectInImage(image) 194 | ... 195 | ``` 196 | 197 | 198 | 199 | ## 課題7 : 画像のラベル付け 200 | 201 | https://firebase.google.com/docs/ml-kit/android/label-images#on-device 202 | 203 | 「TODO: 4 on-device ラベルづけ」を実装する。 204 | 205 | dependency に 206 | ``` 207 | implementation 'com.google.firebase:firebase-ml-vision-image-label-model:15.0.0' 208 | ``` 209 | を追加する。 210 | 211 | ```kotlin 212 | detectButton.isEnabled = false 213 | progressBar.visibility = View.VISIBLE 214 | 215 | val image = FirebaseVisionImage.fromBitmap(bitmap) 216 | 217 | FirebaseVision.getInstance() 218 | .visionLabelDetector 219 | .detectInImage(image) 220 | .addOnSuccessListener { labels -> 221 | detectButton.isEnabled = true 222 | progressBar.visibility = View.GONE 223 | 224 | overlay.add(TextsData(labels.map { "${it.label}, ${it.confidence}" })) 225 | } 226 | .addOnFailureListener { e -> 227 | e.printStackTrace() 228 | detectButton.isEnabled = true 229 | progressBar.visibility = View.GONE 230 | Toast.makeText(this, e.message, Toast.LENGTH_SHORT).show() 231 | } 232 | ``` 233 | 234 | おまけ : option を指定する。 235 | 236 | デフォルトでは確率の高い方から 10 個出力されるが、ConfidenceThreshold を指定すると、 237 | それ以上の確率のものだけが出力される。 238 | 239 | 240 | ```kotlin 241 | val options = FirebaseVisionLabelDetectorOptions.Builder() 242 | .setConfidenceThreshold(0.8f) 243 | .build() 244 | 245 | FirebaseVision.getInstance() 246 | .getVisionLabelDetector(options) 247 | .detectInImage(image) 248 | ``` 249 | 250 | 251 | ## 課題8 : Cloud API 252 | 253 | Cloud API を利用するには Plan を Blaze に変更する必要がある。 254 | 課金設定を行える人だけやってみる。 255 | 256 | https://console.firebase.google.com/ の左メニューの一番下の項目で Plan を Blaze に変える。 257 | 258 | 左メニューの [開発] - [ML Kit] をクリックし、右側の [Cloud API の使用状況] をクリックし、 259 | Cloud Vision API を有効にする。 260 | 261 | 262 | ![Firebase Console](screenshots/firebase_console.png "Firebase Console") 263 | 264 | 265 | ## 課題9 : テキスト認識 Cloud API 266 | 267 | https://firebase.google.com/docs/ml-kit/android/recognize-text#cloud-based 268 | 269 | 「TODO: 5 cloud テキスト認識」を実装する。 270 | 271 | ```kotlin 272 | detectButton.isEnabled = false 273 | progressBar.visibility = View.VISIBLE 274 | 275 | val image = FirebaseVisionImage.fromBitmap(bitmap) 276 | 277 | FirebaseVision.getInstance() 278 | .cloudTextRecognizer 279 | .processImage(image) 280 | .addOnSuccessListener { cloudText -> 281 | detectButton.isEnabled = true 282 | progressBar.visibility = View.GONE 283 | 284 | for (block in cloudText.textBlocks) { 285 | for (line in block.lines) { 286 | for (element in line.elements) { 287 | element.boundingBox?.let { 288 | overlay.add(BoxData(element.text, it)) 289 | } 290 | } 291 | } 292 | } 293 | } 294 | .addOnFailureListener { e -> 295 | e.printStackTrace() 296 | detectButton.isEnabled = true 297 | progressBar.visibility = View.GONE 298 | Toast.makeText(this, e.message, Toast.LENGTH_SHORT).show() 299 | } 300 | ``` 301 | 302 | おまけ : option を指定する。 303 | 304 | ```kotlin 305 | val options = FirebaseVisionCloudTextRecognizerOptions.Builder() 306 | .setModelType(FirebaseVisionCloudDetectorOptions.LATEST_MODEL) 307 | .setModelType(FirebaseVisionCloudTextRecognizerOptions.DENSE_MODEL) 308 | .setLanguageHints(listOf("jp")) 309 | .build() 310 | 311 | FirebaseVision.getInstance() 312 | .getCloudTextRecognizer(options) 313 | .processImage(image) 314 | ... 315 | ``` 316 | 317 | 318 | ## 課題10 : 画像のラベル付け Cloud API 319 | 320 | https://firebase.google.com/docs/ml-kit/android/label-images#cloud-based 321 | 322 | 「TODO: 6 cloud ラベルづけ」を実装する。 323 | 324 | ```kotlin 325 | detectButton.isEnabled = false 326 | progressBar.visibility = View.VISIBLE 327 | 328 | val image = FirebaseVisionImage.fromBitmap(bitmap) 329 | 330 | FirebaseVision.getInstance() 331 | .visionCloudLabelDetector 332 | .detectInImage(image) 333 | .addOnSuccessListener { labels -> 334 | detectButton.isEnabled = true 335 | progressBar.visibility = View.GONE 336 | 337 | overlay.add(TextsData(labels.map { "${it.label}, ${it.confidence}" })) 338 | } 339 | .addOnFailureListener { e -> 340 | e.printStackTrace() 341 | detectButton.isEnabled = true 342 | progressBar.visibility = View.GONE 343 | Toast.makeText(this, e.message, Toast.LENGTH_SHORT).show() 344 | } 345 | ``` 346 | 347 | おまけ : option を指定する。 348 | 349 | ```kotlin 350 | val options = FirebaseVisionCloudDetectorOptions.Builder() 351 | .setModelType(FirebaseVisionCloudDetectorOptions.LATEST_MODEL) 352 | .setMaxResults(15) 353 | .build() 354 | 355 | FirebaseVision.getInstance() 356 | .getVisionCloudLabelDetector(options) 357 | .detectInImage(image) 358 | ... 359 | ``` 360 | 361 | 362 | 363 | 364 | ## 課題11 : ランドマーク認識 Cloud API 365 | 366 | https://firebase.google.com/docs/ml-kit/android/recognize-landmarks#configure-the-landmark-detector 367 | 368 | 「TODO: 6 cloud TODO: 7 cloud ランドマーク認識」を実装する。 369 | 370 | ```kotlin 371 | detectButton.isEnabled = false 372 | progressBar.visibility = View.VISIBLE 373 | 374 | val image = FirebaseVisionImage.fromBitmap(bitmap) 375 | 376 | FirebaseVision.getInstance() 377 | .visionCloudLandmarkDetector 378 | .detectInImage(image) 379 | .addOnSuccessListener { labels -> 380 | detectButton.isEnabled = true 381 | progressBar.visibility = View.GONE 382 | 383 | labels.forEach { 384 | if (it.boundingBox != null) { 385 | overlay.add( 386 | BoxData( 387 | "${it.landmark}, ${it.confidence}", 388 | it.boundingBox!! 389 | ) 390 | ) 391 | } 392 | } 393 | } 394 | .addOnFailureListener { e -> 395 | e.printStackTrace() 396 | detectButton.isEnabled = true 397 | progressBar.visibility = View.GONE 398 | Toast.makeText(this, e.message, Toast.LENGTH_SHORT).show() 399 | } 400 | ``` 401 | 402 | おまけ : option を指定する。 403 | 404 | ```kotlin 405 | val options = FirebaseVisionCloudDetectorOptions.Builder() 406 | .setModelType(FirebaseVisionCloudDetectorOptions.LATEST_MODEL) 407 | .setMaxResults(15) 408 | .build() 409 | 410 | FirebaseVision.getInstance() 411 | .getVisionCloudLandmarkDetector(options) 412 | .detectInImage(image) 413 | ... 414 | ``` 415 | 416 | 417 | 418 | ## 課題12 419 | 420 | 以下の meta-data を AndroidManifest に宣言すると、インストール時に on-device の ML model が自動でダウンロードされる。 421 | この設定を行わない場合、on-device API を最初に実行したときに model がダウンロードされる。 422 | ダウンロードが完了する前の API リクエストは無視される。 423 | 424 | ```xml 425 | 426 | 427 | 430 | 431 | 432 | ``` 433 | 434 | * ocr : テキスト認識 435 | * face : 顔検出 436 | * barcode : バーコードスキャン 437 | * label : ラベルづけ 438 | -------------------------------------------------------------------------------- /app/.gitignore: -------------------------------------------------------------------------------- 1 | /build 2 | google-services.json 3 | -------------------------------------------------------------------------------- /app/build.gradle: -------------------------------------------------------------------------------- 1 | apply plugin: "com.android.application" 2 | apply plugin: "kotlin-android" 3 | apply plugin: "kotlin-android-extensions" 4 | 5 | android { 6 | compileSdkVersion 28 7 | defaultConfig { 8 | applicationId "net.yanzm.mlkitsample" 9 | minSdkVersion 21 10 | targetSdkVersion 28 11 | versionCode 1 12 | versionName "1.0" 13 | testInstrumentationRunner "androidx.test.runner.AndroidJUnitRunner" 14 | } 15 | buildTypes { 16 | release { 17 | minifyEnabled false 18 | proguardFiles getDefaultProguardFile("proguard-android.txt"), "proguard-rules.pro" 19 | } 20 | } 21 | } 22 | 23 | dependencies { 24 | implementation fileTree(dir: "libs", include: ["*.jar"]) 25 | implementation "org.jetbrains.kotlin:kotlin-stdlib-jdk7:$kotlin_version" 26 | implementation "org.jetbrains.kotlinx:kotlinx-coroutines-android:1.1.0" 27 | implementation "androidx.appcompat:appcompat:1.1.0-alpha01" 28 | implementation "androidx.constraintlayout:constraintlayout:2.0.0-alpha3" 29 | implementation "androidx.core:core-ktx:1.0.1" 30 | implementation "com.github.bumptech.glide:glide:4.8.0" 31 | 32 | testImplementation "junit:junit:4.12" 33 | 34 | androidTestImplementation "androidx.test:runner:1.1.1" 35 | androidTestImplementation "androidx.test.espresso:espresso-core:3.1.1" 36 | 37 | implementation "com.google.firebase:firebase-core:16.0.6" 38 | implementation "com.google.firebase:firebase-ml-vision:18.0.2" 39 | implementation "com.google.firebase:firebase-ml-vision-image-label-model:17.0.2" 40 | } 41 | apply plugin: "com.google.gms.google-services" 42 | -------------------------------------------------------------------------------- /app/proguard-rules.pro: -------------------------------------------------------------------------------- 1 | # Add project specific ProGuard rules here. 2 | # You can control the set of applied configuration files using the 3 | # proguardFiles setting in build.gradle. 4 | # 5 | # For more details, see 6 | # http://developer.android.com/guide/developing/tools/proguard.html 7 | 8 | # If your project uses WebView with JS, uncomment the following 9 | # and specify the fully qualified class name to the JavaScript interface 10 | # class: 11 | #-keepclassmembers class fqcn.of.javascript.interface.for.webview { 12 | # public *; 13 | #} 14 | 15 | # Uncomment this to preserve the line number information for 16 | # debugging stack traces. 17 | #-keepattributes SourceFile,LineNumberTable 18 | 19 | # If you keep the line number information, uncomment this to 20 | # hide the original source file name. 21 | #-renamesourcefileattribute SourceFile 22 | -------------------------------------------------------------------------------- /app/src/androidTest/java/net/yanzm/mlkitsample/ExampleInstrumentedTest.kt: -------------------------------------------------------------------------------- 1 | package net.yanzm.mlkitsample 2 | 3 | import androidx.test.InstrumentationRegistry 4 | import androidx.test.runner.AndroidJUnit4 5 | 6 | import org.junit.Test 7 | import org.junit.runner.RunWith 8 | 9 | import org.junit.Assert.* 10 | 11 | /** 12 | * Instrumented test, which will execute on an Android device. 13 | * 14 | * See [testing documentation](http://d.android.com/tools/testing). 15 | */ 16 | @RunWith(AndroidJUnit4::class) 17 | class ExampleInstrumentedTest { 18 | @Test 19 | fun useAppContext() { 20 | // Context of the app under test. 21 | val appContext = InstrumentationRegistry.getTargetContext() 22 | assertEquals("net.yanzm.mlkitsample", appContext.packageName) 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /app/src/main/AndroidManifest.xml: -------------------------------------------------------------------------------- 1 | 2 | 4 | 5 | 6 | 7 | 8 | 15 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 28 | 29 | 30 | 31 | 32 | -------------------------------------------------------------------------------- /app/src/main/java/net/yanzm/mlkitsample/GraphicData.kt: -------------------------------------------------------------------------------- 1 | package net.yanzm.mlkitsample 2 | 3 | import android.graphics.Rect 4 | 5 | sealed class GraphicData 6 | 7 | data class BoxData(val text: String, val boundingBox: Rect) : GraphicData() 8 | 9 | data class TextsData(val texts: List) : GraphicData() 10 | -------------------------------------------------------------------------------- /app/src/main/java/net/yanzm/mlkitsample/GraphicOverlay.kt: -------------------------------------------------------------------------------- 1 | package net.yanzm.mlkitsample 2 | 3 | import android.content.Context 4 | import android.graphics.Canvas 5 | import android.graphics.Color 6 | import android.graphics.Paint 7 | import android.graphics.RectF 8 | import android.util.AttributeSet 9 | import android.view.View 10 | 11 | class GraphicOverlay(context: Context, attrs: AttributeSet) : View(context, attrs) { 12 | private val lock = Any() 13 | 14 | private val graphics = mutableSetOf() 15 | 16 | var targetWidth = 0 17 | var targetHeight = 0 18 | 19 | fun clear() { 20 | synchronized(lock) { 21 | graphics.clear() 22 | } 23 | postInvalidate() 24 | } 25 | 26 | fun add(graphic: GraphicData) { 27 | synchronized(lock) { 28 | graphics.add(graphic) 29 | } 30 | postInvalidate() 31 | } 32 | 33 | fun remove(graphic: GraphicData) { 34 | synchronized(lock) { 35 | graphics.remove(graphic) 36 | } 37 | postInvalidate() 38 | } 39 | 40 | private val rect = RectF() 41 | 42 | private val rectPaint = Paint().apply { 43 | color = Color.WHITE 44 | style = Paint.Style.STROKE 45 | strokeWidth = 2 * resources.displayMetrics.density 46 | } 47 | 48 | private val textPaint = Paint().apply { 49 | color = Color.WHITE 50 | textSize = 20 * resources.displayMetrics.density 51 | } 52 | 53 | fun setRectColor(color: Int) { 54 | rectPaint.color = color 55 | } 56 | 57 | fun setTextColor(color: Int) { 58 | textPaint.color = color 59 | } 60 | 61 | override fun onDraw(canvas: Canvas) { 62 | super.onDraw(canvas) 63 | 64 | val offsetX = (canvas.width - targetWidth) * 0.5f 65 | val offsetY = (canvas.height - targetHeight) * 0.5f 66 | 67 | synchronized(lock) { 68 | for (graphic in graphics) { 69 | when (graphic) { 70 | is BoxData -> { 71 | rect.set(graphic.boundingBox) 72 | rect.offset(offsetX, offsetY) 73 | 74 | canvas.drawRect(rect, rectPaint) 75 | 76 | if (graphic.text.isNotEmpty()) { 77 | canvas.drawText(graphic.text, rect.left, rect.bottom, textPaint) 78 | } 79 | } 80 | is TextsData -> { 81 | val offset = textPaint.textSize * 1.5f 82 | val left = textPaint.textSize * 0.5f 83 | var bottom = offset 84 | for (text in graphic.texts) { 85 | if (text.isNotEmpty()) { 86 | canvas.drawText(text, left, bottom, textPaint) 87 | bottom += offset 88 | } 89 | } 90 | } 91 | } 92 | } 93 | } 94 | } 95 | } 96 | -------------------------------------------------------------------------------- /app/src/main/java/net/yanzm/mlkitsample/ImagePickFragment.kt: -------------------------------------------------------------------------------- 1 | package net.yanzm.mlkitsample 2 | 3 | import android.Manifest 4 | import android.app.Activity 5 | import android.content.Context 6 | import android.content.Intent 7 | import android.content.pm.PackageManager 8 | import android.net.Uri 9 | import android.os.Bundle 10 | import android.provider.MediaStore 11 | import android.view.LayoutInflater 12 | import android.view.View 13 | import android.view.ViewGroup 14 | import androidx.appcompat.app.AlertDialog 15 | import androidx.core.content.ContextCompat 16 | import androidx.core.content.contentValuesOf 17 | import androidx.fragment.app.Fragment 18 | 19 | class ImagePickFragment : Fragment() { 20 | 21 | interface ImagePickListener { 22 | fun onImagePicked(imageUri: Uri) 23 | } 24 | 25 | private var listener: ImagePickListener? = null 26 | private var imageUri: Uri? = null 27 | 28 | override fun onAttach(context: Context) { 29 | super.onAttach(context) 30 | listener = context as? ImagePickListener 31 | } 32 | 33 | override fun onDetach() { 34 | listener = null 35 | super.onDetach() 36 | } 37 | 38 | override fun onCreate(savedInstanceState: Bundle?) { 39 | super.onCreate(savedInstanceState) 40 | if (savedInstanceState != null) { 41 | imageUri = savedInstanceState.getParcelable(KEY_IMAGE_URI) 42 | } 43 | } 44 | 45 | override fun onSaveInstanceState(outState: Bundle) { 46 | super.onSaveInstanceState(outState) 47 | outState.putParcelable(KEY_IMAGE_URI, imageUri) 48 | } 49 | 50 | override fun onCreateView( 51 | inflater: LayoutInflater, 52 | container: ViewGroup?, 53 | savedInstanceState: Bundle? 54 | ): View? { 55 | return inflater.inflate(R.layout.fragment_image_pick, container, false) 56 | } 57 | 58 | override fun onActivityCreated(savedInstanceState: Bundle?) { 59 | super.onActivityCreated(savedInstanceState) 60 | 61 | view!!.setOnClickListener { 62 | AlertDialog.Builder(requireContext()) 63 | .setItems(arrayOf("Camera", "Gallery")) { _, which -> 64 | when (which) { 65 | 0 -> startImageCaptureIntent() 66 | 1 -> startGetContentIntent() 67 | } 68 | } 69 | .show() 70 | } 71 | } 72 | 73 | private fun startImageCaptureIntent() { 74 | if (ContextCompat.checkSelfPermission( 75 | requireContext(), 76 | Manifest.permission.WRITE_EXTERNAL_STORAGE 77 | ) != PackageManager.PERMISSION_GRANTED 78 | ) { 79 | requestPermissions( 80 | arrayOf(Manifest.permission.WRITE_EXTERNAL_STORAGE), 81 | REQUEST_CODE_PERMISSION 82 | ) 83 | return 84 | } 85 | 86 | imageUri = null 87 | 88 | val intent = Intent(MediaStore.ACTION_IMAGE_CAPTURE) 89 | if (intent.resolveActivity(requireContext().packageManager) != null) { 90 | val values = contentValuesOf( 91 | MediaStore.Images.Media.TITLE to "New Picture", 92 | MediaStore.Images.Media.DESCRIPTION to "From Camera" 93 | ) 94 | 95 | imageUri = requireContext().contentResolver.insert( 96 | MediaStore.Images.Media.EXTERNAL_CONTENT_URI, 97 | values 98 | ) 99 | 100 | intent.putExtra(MediaStore.EXTRA_OUTPUT, imageUri) 101 | startActivityForResult(intent, REQUEST_CODE_IMAGE_CAPTURE) 102 | } 103 | } 104 | 105 | private fun startGetContentIntent() { 106 | val intent = Intent().apply { 107 | type = "image/*" 108 | action = Intent.ACTION_GET_CONTENT 109 | } 110 | startActivityForResult( 111 | Intent.createChooser(intent, "Select Picture"), 112 | REQUEST_CODE_CHOOSE_IMAGE 113 | ) 114 | } 115 | 116 | override fun onActivityResult(requestCode: Int, resultCode: Int, data: Intent?) { 117 | super.onActivityResult(requestCode, resultCode, data) 118 | 119 | when (requestCode) { 120 | REQUEST_CODE_IMAGE_CAPTURE -> { 121 | if (resultCode == Activity.RESULT_OK) { 122 | imageUri?.let { 123 | imageUri = null 124 | listener?.onImagePicked(it) 125 | } 126 | } 127 | } 128 | REQUEST_CODE_CHOOSE_IMAGE -> { 129 | if (resultCode == Activity.RESULT_OK) { 130 | data?.data?.let { 131 | listener?.onImagePicked(it) 132 | } 133 | } 134 | } 135 | } 136 | } 137 | 138 | override fun onRequestPermissionsResult( 139 | requestCode: Int, 140 | permissions: Array, 141 | grantResults: IntArray 142 | ) { 143 | super.onRequestPermissionsResult(requestCode, permissions, grantResults) 144 | when (requestCode) { 145 | REQUEST_CODE_PERMISSION -> { 146 | if (grantResults[0] == PackageManager.PERMISSION_GRANTED) { 147 | startImageCaptureIntent() 148 | } 149 | } 150 | } 151 | } 152 | 153 | companion object { 154 | private const val REQUEST_CODE_IMAGE_CAPTURE = 1 155 | private const val REQUEST_CODE_CHOOSE_IMAGE = 2 156 | private const val REQUEST_CODE_PERMISSION = 3 157 | 158 | private const val KEY_IMAGE_URI = "image" 159 | } 160 | } 161 | -------------------------------------------------------------------------------- /app/src/main/java/net/yanzm/mlkitsample/MainActivity.kt: -------------------------------------------------------------------------------- 1 | package net.yanzm.mlkitsample 2 | 3 | import android.graphics.Bitmap 4 | import android.net.Uri 5 | import android.os.Bundle 6 | import android.view.View 7 | import android.widget.ArrayAdapter 8 | import android.widget.Toast 9 | import androidx.appcompat.app.AppCompatActivity 10 | import com.bumptech.glide.Glide 11 | import com.google.firebase.ml.vision.FirebaseVision 12 | import com.google.firebase.ml.vision.barcode.FirebaseVisionBarcode 13 | import com.google.firebase.ml.vision.barcode.FirebaseVisionBarcodeDetectorOptions 14 | import com.google.firebase.ml.vision.cloud.FirebaseVisionCloudDetectorOptions 15 | import com.google.firebase.ml.vision.common.FirebaseVisionImage 16 | import com.google.firebase.ml.vision.face.FirebaseVisionFaceDetectorOptions 17 | import com.google.firebase.ml.vision.label.FirebaseVisionLabelDetectorOptions 18 | import com.google.firebase.ml.vision.text.FirebaseVisionCloudTextRecognizerOptions 19 | import kotlinx.android.synthetic.main.activity_main.* 20 | import kotlinx.coroutines.CoroutineScope 21 | import kotlinx.coroutines.Dispatchers 22 | import kotlinx.coroutines.Job 23 | import kotlinx.coroutines.launch 24 | import kotlinx.coroutines.withContext 25 | import kotlin.coroutines.CoroutineContext 26 | import kotlin.math.max 27 | 28 | class MainActivity : AppCompatActivity(), ImagePickFragment.ImagePickListener, CoroutineScope { 29 | 30 | private var bitmap: Bitmap? = null 31 | 32 | private lateinit var job: Job 33 | override val coroutineContext: CoroutineContext 34 | get() = Dispatchers.Main + job 35 | 36 | override fun onCreate(savedInstanceState: Bundle?) { 37 | super.onCreate(savedInstanceState) 38 | setContentView(R.layout.activity_main) 39 | job = Job() 40 | 41 | val detectors = listOf( 42 | TEXT_DETECTION, 43 | CLOUD_TEXT_DETECTION, 44 | FACE_DETECTION, 45 | BARCODE_DETECTION, 46 | LABELING, 47 | CLOUD_LABELING, 48 | CLOUD_LANDMARK 49 | ) 50 | detectorSpinner.adapter = 51 | ArrayAdapter(this, android.R.layout.simple_spinner_dropdown_item, detectors).apply { 52 | setDropDownViewResource(android.R.layout.simple_spinner_dropdown_item) 53 | } 54 | 55 | detectButton.setOnClickListener { 56 | bitmap?.let { detect(it) } 57 | } 58 | } 59 | 60 | override fun onDestroy() { 61 | job.cancel() 62 | super.onDestroy() 63 | } 64 | 65 | override fun onImagePicked(imageUri: Uri) { 66 | launch { 67 | bitmap = withContext(Dispatchers.Default) { 68 | val imageBitmap = Glide.with(this@MainActivity) 69 | .asBitmap() 70 | .load(imageUri) 71 | .submit(imageView.width, imageView.height) 72 | .get() 73 | 74 | val scaleFactor = max( 75 | imageBitmap.width.toFloat() / imageView.width.toFloat(), 76 | imageBitmap.height.toFloat() / imageView.height.toFloat() 77 | ) 78 | 79 | val targetWidth = (imageBitmap.width / scaleFactor).toInt() 80 | val targetHeight = (imageBitmap.height / scaleFactor).toInt() 81 | 82 | Bitmap.createScaledBitmap( 83 | imageBitmap, 84 | targetWidth, 85 | targetHeight, 86 | true 87 | ) 88 | } 89 | 90 | println("${bitmap!!.width}, ${bitmap!!.height}") 91 | 92 | imageView.setImageBitmap(bitmap) 93 | 94 | overlay.clear() 95 | overlay.targetWidth = bitmap!!.width 96 | overlay.targetHeight = bitmap!!.height 97 | 98 | detectButton.isEnabled = true 99 | } 100 | } 101 | 102 | private fun detect(bitmap: Bitmap) { 103 | overlay.clear() 104 | 105 | val detectorName = detectorSpinner.selectedItem as String 106 | when (detectorName) { 107 | TEXT_DETECTION -> { 108 | // on-device テキスト認識 109 | // https://firebase.google.com/docs/ml-kit/android/recognize-text#on-device 110 | 111 | detectButton.isEnabled = false 112 | progressBar.visibility = View.VISIBLE 113 | 114 | val image = FirebaseVisionImage.fromBitmap(bitmap) 115 | 116 | FirebaseVision.getInstance() 117 | .onDeviceTextRecognizer 118 | .processImage(image) 119 | .addOnSuccessListener { texts -> 120 | detectButton.isEnabled = true 121 | progressBar.visibility = View.GONE 122 | 123 | for (block in texts.textBlocks) { 124 | for (line in block.lines) { 125 | for (element in line.elements) { 126 | element.boundingBox?.let { 127 | overlay.add(BoxData(element.text, it)) 128 | } 129 | } 130 | } 131 | } 132 | } 133 | .addOnFailureListener { e -> 134 | e.printStackTrace() 135 | detectButton.isEnabled = true 136 | progressBar.visibility = View.GONE 137 | Toast.makeText(this, e.message, Toast.LENGTH_SHORT).show() 138 | } 139 | } 140 | FACE_DETECTION -> { 141 | // on-device 顔検出 142 | // https://firebase.google.com/docs/ml-kit/android/detect-faces#on-device 143 | 144 | detectButton.isEnabled = false 145 | progressBar.visibility = View.VISIBLE 146 | 147 | val image = FirebaseVisionImage.fromBitmap(bitmap) 148 | 149 | val options = FirebaseVisionFaceDetectorOptions.Builder() 150 | .setPerformanceMode(FirebaseVisionFaceDetectorOptions.ACCURATE) // or FAST 151 | .setLandmarkMode(FirebaseVisionFaceDetectorOptions.ALL_LANDMARKS) // or NO_LANDMARKS 152 | .setClassificationMode(FirebaseVisionFaceDetectorOptions.ALL_CLASSIFICATIONS) // or NO_CLASSIFICATIONS 153 | .setContourMode(FirebaseVisionFaceDetectorOptions.NO_CONTOURS) // or ALL_CONTOURS 154 | .setMinFaceSize(0.15f) 155 | .enableTracking() 156 | .build() 157 | 158 | FirebaseVision.getInstance() 159 | // .visionFaceDetector 160 | .getVisionFaceDetector(options) 161 | .detectInImage(image) 162 | .addOnSuccessListener { faces -> 163 | detectButton.isEnabled = true 164 | progressBar.visibility = View.GONE 165 | 166 | for (face in faces) { 167 | face.boundingBox?.let { 168 | overlay.add(BoxData(face.smilingProbability.toString(), it)) 169 | } 170 | } 171 | } 172 | .addOnFailureListener { e -> 173 | e.printStackTrace() 174 | detectButton.isEnabled = true 175 | progressBar.visibility = View.GONE 176 | Toast.makeText(this, e.message, Toast.LENGTH_SHORT).show() 177 | } 178 | } 179 | BARCODE_DETECTION -> { 180 | // on-device バーコードスキャン 181 | // https://firebase.google.com/docs/ml-kit/android/read-barcodes#configure-the-barcode-detector 182 | 183 | detectButton.isEnabled = false 184 | progressBar.visibility = View.VISIBLE 185 | 186 | val image = FirebaseVisionImage.fromBitmap(bitmap) 187 | 188 | val options = FirebaseVisionBarcodeDetectorOptions.Builder() 189 | .setBarcodeFormats( 190 | FirebaseVisionBarcode.FORMAT_EAN_8, 191 | FirebaseVisionBarcode.FORMAT_EAN_13 192 | ) 193 | .build() 194 | 195 | FirebaseVision.getInstance() 196 | // .visionBarcodeDetector 197 | .getVisionBarcodeDetector(options) 198 | .detectInImage(image) 199 | .addOnSuccessListener { barcodes -> 200 | detectButton.isEnabled = true 201 | progressBar.visibility = View.GONE 202 | 203 | for (barcode in barcodes) { 204 | barcode.boundingBox?.let { 205 | overlay.add(BoxData(barcode.rawValue ?: "", it)) 206 | } 207 | 208 | println("format : ${barcode.format}") 209 | println("valueType : ${barcode.valueType}") 210 | println("rawValue : ${barcode.rawValue}") 211 | println("displayValue : ${barcode.displayValue}") 212 | println("boundingBox : ${barcode.boundingBox}") 213 | println("cornerPoints : ${barcode.cornerPoints}") 214 | 215 | barcode.calendarEvent?.let { 216 | println("description : ${it.description}") 217 | println("start : ${it.start?.rawValue}") 218 | println("end : ${it.end?.rawValue}") 219 | println("status : ${it.status}") 220 | println("summary : ${it.summary}") 221 | println("location : ${it.location}") 222 | println("organizer : ${it.organizer}") 223 | } 224 | barcode.contactInfo?.let { 225 | println("name : ${it.name?.formattedName}") 226 | println("organization : ${it.organization}") 227 | println("title : ${it.title}") 228 | println("addresses : ${it.addresses}") 229 | println("emails : ${it.emails}") 230 | println("phones : ${it.phones}") 231 | println("urls : ${it.urls}") 232 | } 233 | barcode.email?.let { 234 | println("email : ${it.address}") 235 | } 236 | barcode.geoPoint?.let { 237 | println("geoPoint : ${it.lat}, ${it.lng}") 238 | } 239 | barcode.phone?.let { 240 | println("phone : ${it.number}") 241 | } 242 | barcode.sms?.let { 243 | println("sms : ${it.message}, ${it.phoneNumber}") 244 | } 245 | barcode.url?.let { 246 | println("url : ${it.title}, ${it.url}") 247 | } 248 | barcode.wifi?.let { 249 | println("encryptionType : ${it.encryptionType}") 250 | println("ssid : ${it.ssid}") 251 | println("password : ${it.password}") 252 | } 253 | } 254 | } 255 | .addOnFailureListener { e -> 256 | e.printStackTrace() 257 | detectButton.isEnabled = true 258 | progressBar.visibility = View.GONE 259 | Toast.makeText(this, e.message, Toast.LENGTH_SHORT).show() 260 | } 261 | } 262 | LABELING -> { 263 | // on-device ラベルづけ 264 | // https://firebase.google.com/docs/ml-kit/android/label-images#on-device 265 | 266 | detectButton.isEnabled = false 267 | progressBar.visibility = View.VISIBLE 268 | 269 | val image = FirebaseVisionImage.fromBitmap(bitmap) 270 | 271 | val options = FirebaseVisionLabelDetectorOptions.Builder() 272 | .setConfidenceThreshold(0.8f) 273 | .build() 274 | 275 | FirebaseVision.getInstance() 276 | // .visionLabelDetector 277 | .getVisionLabelDetector(options) 278 | .detectInImage(image) 279 | .addOnSuccessListener { labels -> 280 | detectButton.isEnabled = true 281 | progressBar.visibility = View.GONE 282 | 283 | labels.forEach { 284 | println("label : ${it.label}") 285 | println("confidence : ${it.confidence}") 286 | println("entityId : ${it.entityId}") 287 | } 288 | 289 | overlay.add(TextsData(labels.map { "${it.label}, ${it.confidence}" })) 290 | } 291 | .addOnFailureListener { e -> 292 | e.printStackTrace() 293 | detectButton.isEnabled = true 294 | progressBar.visibility = View.GONE 295 | Toast.makeText(this, e.message, Toast.LENGTH_SHORT).show() 296 | } 297 | } 298 | CLOUD_TEXT_DETECTION -> { 299 | // cloud テキスト認識 300 | // https://firebase.google.com/docs/ml-kit/android/recognize-text#cloud-based 301 | 302 | detectButton.isEnabled = false 303 | progressBar.visibility = View.VISIBLE 304 | 305 | val image = FirebaseVisionImage.fromBitmap(bitmap) 306 | 307 | val options = FirebaseVisionCloudTextRecognizerOptions.Builder() 308 | .setModelType(FirebaseVisionCloudDetectorOptions.LATEST_MODEL) 309 | .setModelType(FirebaseVisionCloudTextRecognizerOptions.DENSE_MODEL) 310 | .setLanguageHints(listOf("jp")) 311 | .build() 312 | 313 | FirebaseVision.getInstance() 314 | // .cloudTextRecognizer 315 | .getCloudTextRecognizer(options) 316 | .processImage(image) 317 | .addOnSuccessListener { cloudText -> 318 | detectButton.isEnabled = true 319 | progressBar.visibility = View.GONE 320 | 321 | for (block in cloudText.textBlocks) { 322 | for (line in block.lines) { 323 | for (element in line.elements) { 324 | element.boundingBox?.let { 325 | overlay.add(BoxData(element.text, it)) 326 | } 327 | } 328 | } 329 | } 330 | } 331 | .addOnFailureListener { e -> 332 | e.printStackTrace() 333 | detectButton.isEnabled = true 334 | progressBar.visibility = View.GONE 335 | Toast.makeText(this, e.message, Toast.LENGTH_SHORT).show() 336 | } 337 | } 338 | CLOUD_LABELING -> { 339 | // cloud ラベルづけ 340 | // https://firebase.google.com/docs/ml-kit/android/label-images#cloud-based 341 | 342 | detectButton.isEnabled = false 343 | progressBar.visibility = View.VISIBLE 344 | 345 | val image = FirebaseVisionImage.fromBitmap(bitmap) 346 | 347 | val options = FirebaseVisionCloudDetectorOptions.Builder() 348 | .setModelType(FirebaseVisionCloudDetectorOptions.LATEST_MODEL) 349 | .setMaxResults(15) 350 | .build() 351 | 352 | FirebaseVision.getInstance() 353 | // .visionCloudLabelDetector 354 | .getVisionCloudLabelDetector(options) 355 | .detectInImage(image) 356 | .addOnSuccessListener { labels -> 357 | detectButton.isEnabled = true 358 | progressBar.visibility = View.GONE 359 | 360 | labels.forEach { 361 | println("label : ${it.label}") 362 | println("confidence : ${it.confidence}") 363 | println("entityId : ${it.entityId}") 364 | } 365 | 366 | overlay.add(TextsData(labels.map { "${it.label}, ${it.confidence}" })) 367 | } 368 | .addOnFailureListener { e -> 369 | e.printStackTrace() 370 | detectButton.isEnabled = true 371 | progressBar.visibility = View.GONE 372 | Toast.makeText(this, e.message, Toast.LENGTH_SHORT).show() 373 | } 374 | } 375 | CLOUD_LANDMARK -> { 376 | // cloud ランドマーク認識 377 | // https://firebase.google.com/docs/ml-kit/android/recognize-landmarks#configure-the-landmark-detector 378 | 379 | detectButton.isEnabled = false 380 | progressBar.visibility = View.VISIBLE 381 | 382 | val image = FirebaseVisionImage.fromBitmap(bitmap) 383 | 384 | val options = FirebaseVisionCloudDetectorOptions.Builder() 385 | .setModelType(FirebaseVisionCloudDetectorOptions.LATEST_MODEL) 386 | .setMaxResults(15) 387 | .build() 388 | 389 | FirebaseVision.getInstance() 390 | // .visionCloudLandmarkDetector 391 | .getVisionCloudLandmarkDetector(options) 392 | .detectInImage(image) 393 | .addOnSuccessListener { labels -> 394 | detectButton.isEnabled = true 395 | progressBar.visibility = View.GONE 396 | 397 | labels.forEach { 398 | if (it.boundingBox != null) { 399 | overlay.add( 400 | BoxData( 401 | "${it.landmark}, ${it.confidence}", 402 | it.boundingBox!! 403 | ) 404 | ) 405 | } 406 | 407 | println("boundingBox : ${it.boundingBox}") 408 | println("confidence : ${it.confidence}") 409 | println("entityId : ${it.entityId}") 410 | println("landmark : ${it.landmark}") 411 | println("locations : ${it.locations[0].latitude}, ${it.locations[0].longitude}") 412 | } 413 | } 414 | .addOnFailureListener { e -> 415 | e.printStackTrace() 416 | detectButton.isEnabled = true 417 | progressBar.visibility = View.GONE 418 | Toast.makeText(this, e.message, Toast.LENGTH_SHORT).show() 419 | } 420 | } 421 | } 422 | } 423 | 424 | companion object { 425 | private const val TEXT_DETECTION = "Text" 426 | private const val CLOUD_TEXT_DETECTION = "Cloud Text" 427 | private const val FACE_DETECTION = "Face" 428 | private const val BARCODE_DETECTION = "Barcode" 429 | private const val LABELING = "Labeling" 430 | private const val CLOUD_LABELING = "Cloud Labeling" 431 | private const val CLOUD_LANDMARK = "Cloud Landmark" 432 | } 433 | } 434 | -------------------------------------------------------------------------------- /app/src/main/res/drawable-v24/ic_launcher_foreground.xml: -------------------------------------------------------------------------------- 1 | 7 | 12 | 13 | 19 | 22 | 25 | 26 | 27 | 28 | 34 | 35 | -------------------------------------------------------------------------------- /app/src/main/res/drawable/ic_launcher_background.xml: -------------------------------------------------------------------------------- 1 | 2 | 7 | 10 | 15 | 20 | 25 | 30 | 35 | 40 | 45 | 50 | 55 | 60 | 65 | 70 | 75 | 80 | 85 | 90 | 95 | 100 | 105 | 110 | 115 | 120 | 125 | 130 | 135 | 140 | 145 | 150 | 155 | 160 | 165 | 170 | 171 | -------------------------------------------------------------------------------- /app/src/main/res/layout/activity_main.xml: -------------------------------------------------------------------------------- 1 | 2 | 8 | 9 | 19 | 20 | 30 | 31 |