├── .gitignore
├── .metadata
├── CHANGELOG.md
├── LICENSE
├── README.md
├── analysis_options.yaml
├── android
├── .gitignore
├── build.gradle
├── gradle.properties
├── gradle
│ └── wrapper
│ │ └── gradle-wrapper.properties
├── settings.gradle
└── src
│ ├── main
│ ├── AndroidManifest.xml
│ └── java
│ │ └── flutter
│ │ └── tflite_audio
│ │ ├── AudioChunk.java
│ │ ├── AudioData.java
│ │ ├── AudioFile.java
│ │ ├── AudioProcessing.java
│ │ ├── Debugging.java
│ │ ├── LabelSmoothing.java
│ │ ├── MediaDecoder.java
│ │ ├── Recording.java
│ │ ├── RecordingData.java
│ │ ├── SignalProcessing.java
│ │ ├── TfliteAudioPlugin.java
│ │ └── lib
│ │ └── jlibrosa-1.1.8-SNAPSHOT-jar-with-dependencies.jar
│ └── test
│ └── java
│ └── flutter
│ └── tflite_audio
│ ├── AudioFileTest.java
│ └── RecordingTest.java
├── example
├── .gitignore
├── .metadata
├── README.md
├── android
│ ├── .gitignore
│ ├── app
│ │ ├── build.gradle
│ │ └── src
│ │ │ ├── androidTest
│ │ │ └── java
│ │ │ │ └── flutter
│ │ │ │ ├── DartIntegrationTest.java
│ │ │ │ └── tflite_audio_example
│ │ │ │ └── FlutterActivityTest.java
│ │ │ ├── debug
│ │ │ └── AndroidManifest.xml
│ │ │ ├── main
│ │ │ ├── AndroidManifest.xml
│ │ │ ├── java
│ │ │ │ └── flutter
│ │ │ │ │ └── tflite_audio_example
│ │ │ │ │ └── MainActivity.java
│ │ │ └── res
│ │ │ │ ├── drawable
│ │ │ │ └── launch_background.xml
│ │ │ │ ├── mipmap-hdpi
│ │ │ │ └── ic_launcher.png
│ │ │ │ ├── mipmap-mdpi
│ │ │ │ └── ic_launcher.png
│ │ │ │ ├── mipmap-xhdpi
│ │ │ │ └── ic_launcher.png
│ │ │ │ ├── mipmap-xxhdpi
│ │ │ │ └── ic_launcher.png
│ │ │ │ ├── mipmap-xxxhdpi
│ │ │ │ └── ic_launcher.png
│ │ │ │ └── values
│ │ │ │ └── styles.xml
│ │ │ └── profile
│ │ │ └── AndroidManifest.xml
│ ├── build.gradle
│ ├── gradle.properties
│ ├── gradle
│ │ └── wrapper
│ │ │ └── gradle-wrapper.properties
│ └── settings.gradle
├── assets
│ ├── decoded_wav_label.txt
│ ├── decoded_wav_model.tflite
│ ├── google_teach_machine_label.txt
│ ├── google_teach_machine_model.tflite
│ ├── mfcc_label.txt
│ ├── mfcc_model.tflite
│ ├── sample_audio_16k_mono.wav
│ ├── sample_audio_44k_mono.wav
│ ├── spectrogram_label.txt
│ └── spectrogram_model.tflite
├── audio_recognition_example.jpg
├── ios
│ ├── .gitignore
│ ├── Flutter
│ │ ├── AppFrameworkInfo.plist
│ │ ├── Debug.xcconfig
│ │ └── Release.xcconfig
│ ├── Podfile
│ ├── Runner.xcodeproj
│ │ ├── project.pbxproj
│ │ ├── project.xcworkspace
│ │ │ ├── contents.xcworkspacedata
│ │ │ └── xcshareddata
│ │ │ │ ├── IDEWorkspaceChecks.plist
│ │ │ │ └── WorkspaceSettings.xcsettings
│ │ └── xcshareddata
│ │ │ └── xcschemes
│ │ │ └── Runner.xcscheme
│ ├── Runner.xcworkspace
│ │ ├── contents.xcworkspacedata
│ │ └── xcshareddata
│ │ │ └── IDEWorkspaceChecks.plist
│ ├── Runner
│ │ ├── AppDelegate.swift
│ │ ├── Assets.xcassets
│ │ │ ├── AppIcon.appiconset
│ │ │ │ ├── Contents.json
│ │ │ │ ├── Icon-App-1024x1024@1x.png
│ │ │ │ ├── Icon-App-20x20@1x.png
│ │ │ │ ├── Icon-App-20x20@2x.png
│ │ │ │ ├── Icon-App-20x20@3x.png
│ │ │ │ ├── Icon-App-29x29@1x.png
│ │ │ │ ├── Icon-App-29x29@2x.png
│ │ │ │ ├── Icon-App-29x29@3x.png
│ │ │ │ ├── Icon-App-40x40@1x.png
│ │ │ │ ├── Icon-App-40x40@2x.png
│ │ │ │ ├── Icon-App-40x40@3x.png
│ │ │ │ ├── Icon-App-60x60@2x.png
│ │ │ │ ├── Icon-App-60x60@3x.png
│ │ │ │ ├── Icon-App-76x76@1x.png
│ │ │ │ ├── Icon-App-76x76@2x.png
│ │ │ │ └── Icon-App-83.5x83.5@2x.png
│ │ │ └── LaunchImage.imageset
│ │ │ │ ├── Contents.json
│ │ │ │ ├── LaunchImage.png
│ │ │ │ ├── LaunchImage@2x.png
│ │ │ │ ├── LaunchImage@3x.png
│ │ │ │ └── README.md
│ │ ├── Base.lproj
│ │ │ ├── LaunchScreen.storyboard
│ │ │ └── Main.storyboard
│ │ ├── Info.plist
│ │ └── Runner-Bridging-Header.h
│ └── RunnerTests
│ │ ├── AudioFileTest.swift
│ │ └── RecordingTest.swift
├── lib
│ └── main.dart
└── pubspec.yaml
├── ios
├── .gitignore
├── Assets
│ └── .gitkeep
├── Classes
│ ├── LabelSmoothing.swift
│ ├── SwiftTfliteAudioPlugin.swift
│ ├── TfliteAudioPlugin.h
│ ├── TfliteAudioPlugin.m
│ └── processing
│ │ ├── AudioFile.swift
│ │ ├── AudioFileData.swift
│ │ ├── Recording.swift
│ │ └── RecordingData.swift
└── tflite_audio.podspec
├── lib
└── tflite_audio.dart
├── pictures
├── deployment-target.png
├── finish.png
├── model-label-asset.png
├── start.png
└── tflite-select-ops-installation.png
├── pubspec.yaml
└── test
├── tflite_audio_channel_test.dart
└── tflite_audio_stream_test.dart
/.gitignore:
--------------------------------------------------------------------------------
1 | # Miscellaneous
2 | *.class
3 | *.lock
4 | *.log
5 | *.pyc
6 | *.swp
7 | .DS_Store
8 | .atom/
9 | .buildlog/
10 | .history
11 | .svn/
12 | .classpath
13 | .project
14 | .settings
15 |
16 | # IntelliJ related
17 | *.iml
18 | *.ipr
19 | *.iws
20 | .idea/
21 |
22 | # Visual Studio Code related
23 | .vscode/
24 |
25 | # Flutter repo-specific
26 | /bin/cache/
27 | /bin/mingit/
28 | /dev/benchmarks/mega_gallery/
29 | /dev/bots/.recipe_deps
30 | /dev/bots/android_tools/
31 | /dev/docs/doc/
32 | /dev/docs/flutter.docs.zip
33 | /dev/docs/lib/
34 | /dev/docs/pubspec.yaml
35 | /dev/integration_tests/**/xcuserdata
36 | /dev/integration_tests/**/Pods
37 | /packages/flutter/coverage/
38 | version
39 |
40 | # packages file containing multi-root paths
41 | .packages.generated
42 |
43 | # Flutter/Dart/Pub related
44 | **/doc/api/
45 | **/ios/Flutter/.last_build_id
46 | .dart_tool/
47 | .flutter-plugins
48 | .packages
49 | .pub-cache/
50 | .pub/
51 | build/
52 | flutter_*.png
53 | linked_*.ds
54 | unlinked.ds
55 | unlinked_spec.ds
56 |
57 | # Android related
58 | **/android/**/gradle-wrapper.jar
59 | **/android/.gradle
60 | **/android/captures/
61 | **/android/gradlew
62 | **/android/gradlew.bat
63 | **/android/local.properties
64 | **/android/**/GeneratedPluginRegistrant.java
65 | **/android/key.properties
66 | *.jks
67 |
68 | # iOS/XCode related
69 | **/ios/**/*.mode1v3
70 | **/ios/**/*.mode2v3
71 | **/ios/**/*.moved-aside
72 | **/ios/**/*.pbxuser
73 | **/ios/**/*.perspectivev3
74 | **/ios/**/*sync/
75 | **/ios/**/.sconsign.dblite
76 | **/ios/**/.tags*
77 | **/ios/**/.vagrant/
78 | **/ios/**/DerivedData/
79 | **/ios/**/Icon?
80 | **/ios/**/Pods/
81 | **/ios/**/.symlinks/
82 | **/ios/**/profile
83 | **/ios/**/xcuserdata
84 | **/ios/.generated/
85 | **/ios/Flutter/App.framework
86 | **/ios/Flutter/Flutter.framework
87 | **/ios/Flutter/Generated.xcconfig
88 | **/ios/Flutter/app.flx
89 | **/ios/Flutter/app.zip
90 | **/ios/Flutter/flutter_assets/
91 | **/ios/Flutter/flutter_export_environment.sh
92 | **/ios/ServiceDefinitions.json
93 | **/ios/Runner/GeneratedPluginRegistrant.*
94 |
95 | # Coverage
96 | coverage/
97 |
98 | # Exceptions to above rules.
99 | !**/ios/**/default.mode1v3
100 | !**/ios/**/default.mode2v3
101 | !**/ios/**/default.pbxuser
102 | !**/ios/**/default.perspectivev3
103 | !/packages/flutter_tools/test/data/dart_dependencies_test/**/.packages
--------------------------------------------------------------------------------
/.metadata:
--------------------------------------------------------------------------------
1 | # This file tracks properties of this Flutter project.
2 | # Used by Flutter tool to assess capabilities and perform upgrades etc.
3 | #
4 | # This file should be version controlled and should not be manually edited.
5 |
6 | version:
7 | revision: f139b11009aeb8ed2a3a3aa8b0066e482709dde3
8 | channel: stable
9 |
10 | project_type: plugin
11 |
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | ## 0.3.0
2 | * BREAK CHANGE: Recording bufferSize now takes in 2x the number of samples. To keep the same recording length, simply divide your previous bufferSize by 2.
3 | * Experimental: Support MFCC, melSpectrogram and spectrogram inputs
4 | * Feature: Can automatically or manually set audio length
5 | * Feature: Can automatically or manually transpose input shape
6 | * Improvement: Stability of asyncronous operations with RxJava and RxSwift
7 | * Improvement: (iOS) Removed meta info when extracting data from audio file.
8 | * Improvement: (Android) Splicing algorithm passes all test case. Audio recogntion should now be more accurate.
9 | * Fixed: (iOS) Duplicate symbol error. Set version of TensorFlowLite to 2.6.0. Problem found [here][i25].
10 | * Fixed: (Android & iOS) Incorrect padding when splicing audio file. All test cases have passed.
11 |
12 | [i25]: https://github.com/Caldarie/flutter_tflite_audio/issues/25
13 |
14 | ## 0.2.2+4
15 | * Handled NaN exception for raw output on swift
16 |
17 | ## 0.2.2+3
18 | * Hot fixed iOS issue where it will record indefintetly.
19 |
20 | ## 0.2.2+2
21 | * Hot fixed missing AudioProcessing class.
22 |
23 | ## 0.2.2+1
24 | * Hot fixed issue with unresponsive forced stop recognition.
25 |
26 | ## 0.2.2
27 | * Feature: Added ability to recognise stored audio files
28 | * Breaking Change: RecordingLength will no longer be required as a parameter.
29 | * Fixed: NaN output for bufferRates that are non divisible to audioLength
30 | * Fixed: android permission error when granted outside app.
31 |
32 | ## 0.2.1+2
33 | * Fixed NaN raw score output for Android.
34 |
35 | ## 0.2.1+1
36 | * Fixed inaccurate numOfInference count for iOS and android.
37 |
38 | ## 0.2.1
39 | * Improved recognition accuracy for Google Teachable Machine models
40 | * Fixed memory crash on android
41 | * Improved memory performance on iOS
42 | * Added feature to output raw scores
43 | * moved inputType to loadModel() instead of startAudioRecognition()
44 |
45 | ## 0.2.0
46 | * Fixed crashed on Android when force stopping recognition
47 | * Improve recognition latency on android by reducing number of event calls.
48 |
49 | ## 0.1.9
50 | * Added support for android V2 embedding
51 | * Breaking change - no longer supports deprecated versions of Android (pre 1.12)
52 |
53 | ## 0.1.8+2
54 | * Fixed null safety incompatability with example
55 |
56 | ## 0.1.8+1
57 | * Fixed the problem with bridge NSNumber to Float
58 | * Merged rawAudioRecognize() and decodedWavRecognize() on native platforms
59 | * Set detection parameters to 0 for better performance.
60 |
61 | ## 0.1.8
62 | * Added null safety compatability
63 |
64 | ## 0.1.7+1
65 | * Hotfixed iOS crash when casting double to float for detectionThreshold
66 |
67 | ## 0.1.7
68 | * Fixed iOS bug where stream wont close when permission has been denied.
69 | * Added feature where you can adjust the detection sensitivity of the model
70 |
71 | ## 0.1.6+2
72 | * Fixed podsec error
73 | * Fixed iOS incompatability with fluter 2.0.0
74 |
75 | ## 0.1.6+1
76 | * Hotfixed missing value for recording.
77 |
78 | ## 0.1.6
79 | * bufferSize no longer needs to be divisible to recording length.
80 |
81 | ## 0.1.5+3
82 | * Fixed major android crash, where forcibly stopping the stream causes recorder.stop() to be prematurely called.
83 | * Fixed minor iOS crash, where forcibly stopping the stream during recognition returns a nil exception.
84 | * Cleaned up example for easy switch between decodedWav and Google's Teachable Machine model
85 |
86 | ## 0.1.5+2
87 | * Disabled Google's Teachable Machine by default to reduce app footprint. (This can be enabled manually)
88 | * Adjusted example's values to improve inference accuracy
89 |
90 | ## 0.1.5+1
91 | * Added documentation
92 | * Added example model from Google's Teachable Machine.
93 | * Fixed iOS crash when loading text file with empty elements.
94 |
95 | ## 0.1.5
96 | * Added support for Google Teachable Machine models.
97 | * Fixed inaccurate reading with recording
98 | * Added feature to switch between decodedwav and Google's Teachable machine model.
99 |
100 | ## 0.1.4
101 | * Added a new feature where you can run multiple inferences per recording.
102 | * Replaced future with stream when getting results from inferences
103 | * Added a button to cancel the stream / inference
104 | * Removed unnecessary code for easier reading.
105 |
106 | ## 0.1.3+1
107 | * Used reusable widgets for easier to read code.
108 | * Added some documentation
109 |
110 | ## 0.1.3
111 | * Hotfix for crash when permission has been denied.
112 | * Added the key 'hasPermission' for the future startAudioRecognitions().
113 | * Added feature in example where it'll show inference times
114 |
115 | ## 0.1.2
116 | * Instead of returning a single string value, the future startAudioRecognition() now returns a map with the following keys:
117 | - recognitionResult
118 | - inferenceTime
119 | * Fixed issue in example where pressing the record button multiple times will crash the app.
120 | * Added feature in example where pressing the recording button changes color.
121 |
122 | ## 0.1.1
123 | * Made some fixes with making options explicit
124 | * Added alert dialog when permission is denied.
125 |
126 | ## 0.1.0
127 | * Added iOS support
128 |
129 | ## 0.0.4
130 | * Added the following arguments into the future: startAudioRecognition()
131 | - sampleRate
132 | - audioLength
133 | - bufferSize
134 |
135 | ## 0.0.3
136 | * Merged permission and audio recognition futures into one future.
137 |
138 | ## 0.0.2
139 | * Fixed image url
140 |
141 | ## 0.0.1
142 |
143 | * Initial release.
144 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | The MIT License (MIT)
2 | Copyright (c) 2020 Michael Nguyen
3 |
4 | Permission is hereby granted, free of charge, to any person
5 | obtaining a copy of this software and associated documentation
6 | files (the "Software"), to deal in the Software without restriction,
7 | including without limitation the rights to use, copy, modify, merge,
8 | publish, distribute, sublicense, and/or sell copies of the Software,
9 | and to permit persons to whom the Software is furnished to do so,
10 | subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included
13 | in all copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18 | IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
19 | DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 | OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 | USE OR OTHER DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # TFlite Audio Plugin for Flutter
2 |
3 | [](https://pub.dev/packages/tflite_audio)
4 | [](https://opensource.org/licenses/MIT)
5 | [](https://pub.dev/packages/effective_dart)
6 |
7 |
8 |
9 | Audio classification Tflite package for flutter (iOS & Android). Can also support Google Teachable Machine models.
10 |
11 | If you are a complete newbie to audio classification, you can read the tutorial [here](https://carolinamalbuquerque.medium.com/audio-recognition-using-tensorflow-lite-in-flutter-application-8a4ad39964ae). Credit to [Carolina](https://github.com/cmalbuquerque) for writing a comprehensive article.
12 |
13 | To keep this project alive, consider giving a star or a like. Pull requests or bug reports are also welcome.
14 |
15 |
16 |
17 | Recording | Inference result
18 | :-------------------------:|:-------------------------:
19 |  | 
20 |
21 |
22 |
23 | ## Table of Contents
24 |
25 | * [About this plugin](#about-this-plugin)
26 | * [Known Issues/Commonly asked questions](#known-issuescommonly-asked-questions)
27 | * [Please read if you are using Google's Teachable Machine. Otherwise skip.](#please-read-if-you-are-using-googles-teachable-machine-otherwise-skip)
28 | * [How to add tflite model and label to flutter](#how-to-add-tflite-model-and-label-to-flutter)
29 | * [How to use this plugin](#how-to-use-this-plugin)
30 | * [Rough guide on parameters](#rough-guide-on-the-parameters)
31 | * [Android Installation & Permissions](#android-installation--permissions)
32 | * [iOS Installation & Permissions](#ios-installation--permissions)
33 | * [References](#references)
34 |
35 |
36 |
37 | ## About This Plugin
38 |
39 | ### The plugin has several features:
40 |
41 | 1. Audio recognition for stored audio files. (Only mono wav files for now)
42 |
43 | 2. Audio recognition for recordings.
44 |
45 | 3. Tunable parameters for recording/inferences
46 | * Please look a the [parameters](#rough-guide-on-the-parameters) below for more information.
47 |
48 | 4. Automatically reshape/transpose audio inputs.
49 |
50 |
51 |
52 | ### This plugin can support several model types:
53 |
54 | 1. Google Teachable Machine (Raw audio input)
55 |
56 | * For beginners with little to no machine learning knowledge. You can read can read the tutorial [here](https://carolinamalbuquerque.medium.com/audio-recognition-using-tensorflow-lite-in-flutter-application-8a4ad39964ae) if you are a newbie.
57 | * Training can be done [here](https://teachablemachine.withgoogle.com/train/audio)
58 |
59 | 2. Raw audio input.
60 |
61 | * Can recognize the following inputs: float32[audioLength, 1] or float32[1, audioLength]
62 | * For more information on how to train your own model, take a look [here](https://github.com/tensorflow/examples/tree/master/lite/examples/speech_commands/ml).
63 |
64 | 3. Decoded wav input.
65 |
66 | * Supports two inputs: float32[audioLength, 1] and int32[1]
67 | * For more information on how to train your own model. Take a look [here](https://github.com/tensorflow/docs/blob/master/site/en/r1/tutorials/sequences/audio_recognition.md)
68 | * To train a decoded wave with MFCC, take a look [here](https://github.com/tensorflow/tensorflow/tree/r1.15/tensorflow/examples/speech_commands)
69 |
70 | 4. **(Experimental feature)** Spectogram, melspectrogram, and MFCC inputs.
71 |
72 | * Please note that this feature is experimental, and results may not be accurate compared to raw audio / decoded wav.
73 | * Spectrogram model can be trained here [tutorial](https://www.tensorflow.org/tutorials/audio/simple_audio).
74 |
75 | 5. **(Currently worked on feature)** Multiple input and outputs.
76 |
77 |
78 |
79 | ## Known Issues/Commonly asked questions
80 |
81 | 1. **How to adjust the recording length/time**
82 |
83 | There are two ways to reduce adjust recording length/time:
84 |
85 | * You can increase the recording time by adjusting the bufferSize to a lower value.
86 |
87 | * You can also increase recording time by lowering the sample rate.
88 |
89 | **Note:** That stretching the value too low will cause problems with model accuracy. In that case, you may want to consider lowering your sample rate as well. Likewise, a very low sample rate can also cause problems with accuracy. It is your job to find the sweetspot for both values.
90 |
91 | 2. **How to reduce false positives in my model**
92 |
93 | To reduce false positives, you may want to adjust the default values of `detectionThreshold=0.3` and `averageWindowDuration=1000` to a higher value. A good value for both respectively are `0.7` and `1500`. For more details about these parameters, please visit this [section](#rough-guide-on-the-parameters).
94 |
95 | 3. **I am getting build errors on iOS**
96 |
97 | There are several ways to fix this:
98 |
99 | * Some have reported to fix this issue by replacing the following line:
100 |
101 | ```ruby
102 | target 'Runner' do
103 | use_frameworks!
104 | use_modular_headers!
105 | #pod 'TensorFlowLiteSelectTfOps' #Old line
106 | pod'TensorFlowLiteSelectTfOps','~> 2.6.0' #New line
107 |
108 | flutter_install_all_ios_pods File.dirname(File.realpath(__FILE__))
109 | end
110 | ```
111 |
112 | * Others have fixed this issue building the app without the line: `pod 'TensorFlowLiteSelectTfOps`. Then rebuilding the app by re-adding the line again.
113 |
114 | * Remember to run the following below:
115 |
116 | ```
117 | 1. cd into iOS folder
118 |
119 | 2. Run `flutter pub get` on terminal
120 |
121 | 3. Run `pod install` on terminal
122 |
123 | 4. Run `flutter clean` on terminal
124 |
125 | 5. Run `flutter run` on terminal. All done!
126 | ```
127 |
128 | 4. **I am getting TensorFlow Lite Error on iOS. - Regular TensorFlow ops are not supported by this interpreter. Make sure you apply/link the Flex delegate before inference**
129 |
130 | * Please make sure that you have enabled ops-select on your [podfile - step 4 & Xcode - step 5](#ios-if-you-are-using-googles-teachable-machine-model-otherwise-skip) and [build gradle - step 3](#android-if-you-are-using-googles-teachable-machine-otherwise-skip)
131 |
132 | * If you tried above, please run the example on a device (not emulator). If you still recieved this error, its very likely that theres an issue with cocoapod or Xcode configuration. Please check the [issue #7](https://github.com/Caldarie/flutter_tflite_audio/issues/7)
133 |
134 | * If you recieved this error from your custom model (not GTM), its likely that you're using unsupported tensorflow operators for tflite, as found in [issue #5](https://github.com/Caldarie/flutter_tflite_audio/issues/5#issuecomment-789260402). For more details on which operators are supported, look at the official documentation [here](https://www.tensorflow.org/lite/guide/ops_compatibility)
135 |
136 | * Take a looking at issue number 3 if none of the above works.
137 |
138 | 5. **(iOS) App crashes when running Google's Teachable Machine model**
139 |
140 | Please run your simulation on actual iOS device. Running your device on M1 macs should also be ok.
141 |
142 | As of this moment, there's [limited support](https://github.com/tensorflow/tensorflow/issues/44997#issuecomment-734001671) for x86_64 architectures from the Tensorflow Lite select-ops framework. If you absolutely need to run it on an emulator, you can consider building the select ops framework yourself. Instructions can be found [here](https://www.tensorflow.org/lite/guide/ops_select#ios)
143 |
144 | 6. **(Android) Fatal signal 11 (SIGSEGV), code 1 (SEGV_MAPERR), fault addr 0xfffffff4 in tid 5403**
145 |
146 | It seems like the latest tflite package for android is causing this issue. Until this issue is fixed, please run this package on an actual Android Device.
147 |
148 |
149 |
150 | ## Please Read If You Are Using Google's Teachable Machine. (Otherwise Skip)
151 |
152 |
153 |
154 | **BE AWARE:** Google's Teachable Machine requires [select tensorflow operators](https://www.tensorflow.org/lite/guide/ops_select#using_bazel_xcode) to work. This feature is experimental and will cause the following issues:
155 |
156 | 1. Increase the overall size of your app. If this is unnacceptable for you, it's recommended that you build your own custom model. Tutorials can be found in the [About this plugin section](#about-this-plugin)
157 |
158 | 2. Emulators for iOS do not work due to limited support for x86_64 architectures. You need to run your simulation on an actual device. Issue can be found [here](https://github.com/tensorflow/tensorflow/issues/44997)
159 |
160 | 3. You will need to manually implement ops-select on your [podfile - step 4 & Xcode - step 5](#note-skip-below-if-your-are-not-using-google-teachable-machine-ios) and [build gradle - step 3](#note-skip-below-if-your-are-not-using-google-teachable-machine-android)
161 |
162 |
163 |
164 | ## How to add tflite model and label to flutter:
165 |
166 |
167 |
168 | 1. Create an assets folder and then place your custom tflite model and labels inside.
169 |
170 | 
171 |
172 | 2. In pubsec.yaml, link your tflite model and label under 'assets'. For example:
173 |
174 | ```
175 | assets:
176 | - assets/decoded_wav_model.tflite
177 | - assets/decoded_wav_label.txt
178 |
179 | ```
180 |
181 |
182 |
183 | ## How to use this plugin
184 |
185 |
186 |
187 | Please look at the [example](https://github.com/Caldarie/flutter_tflite_audio/tree/master/example) on how to implement these futures.
188 |
189 |
190 | 1. To add the package in pubspec.yaml, open your terminal and run this line in your flutter project:
191 |
192 | ```
193 | flutter pub add tflite_audio
194 | ```
195 |
196 | 2. Import the plugin. For example:
197 |
198 | ```
199 | import 'package:tflite_audio/tflite_audio.dart';
200 | ```
201 |
202 |
203 | 3. To load your model:
204 |
205 |
206 | ```dart
207 | //Example for decodedWav models
208 | TfliteAudio.loadModel(
209 | model: 'assets/conv_actions_frozen.tflite',
210 | label: 'assets/conv_actions_label.txt',
211 | inputType: 'decodedWav');
212 |
213 |
214 | //Example for Google's Teachable Machine models
215 | TfliteAudio.loadModel(
216 | model: 'assets/google_teach_machine_model.tflite',
217 | label: 'assets/google_teach_machine_label.txt',
218 | inputType: 'rawAudio');
219 |
220 | //Example if you want to take advantage of all optional parameters from loadModel()
221 | TfliteAudio.loadModel(
222 | model: 'assets/conv_actions_frozen.tflite',
223 | label: 'assets/conv_actions_label.txt',
224 | inputType: 'decodedWav',
225 | outputRawScores: false,
226 | numThreads: 1,
227 | isAsset: true,
228 | );
229 | ```
230 |
231 |
232 | 4. To start and listen to the stream for inference results:
233 |
234 | * Declare stream value
235 | ```dart
236 | Stream