├── .editorconfig
├── .github
├── CONTRIBUTING.md
├── ISSUE_TEMPLATE.md
└── PULL_REQUEST_TEMPLATE.md
├── .gitignore
├── .jshintrc
├── CHANGELOG.md
├── LICENSE
├── README.md
├── package.json
├── plugin.xml
├── src
├── android
│ └── SpeechRecognition.java
└── ios
│ ├── Headers
│ ├── ISSpeechRecognition.h
│ ├── ISSpeechRecognitionLocales.h
│ ├── ISSpeechRecognitionResult.h
│ ├── ISSpeechSynthesis.h
│ ├── ISSpeechSynthesisVoices.h
│ └── iSpeechSDK.h
│ ├── SpeechRecognition.h
│ ├── SpeechRecognition.m
│ ├── iSpeechSDK.bundle
│ ├── ISPopupBackground.png
│ ├── ISPopupBackground@2x.png
│ ├── ISPopupButton.png
│ ├── ISPopupButton@2x.png
│ ├── ISPopupButtonPress.png
│ ├── ISPopupButtonPress@2x.png
│ ├── ISPopupDefaultButton.png
│ ├── ISPopupDefaultButton@2x.png
│ ├── Info.plist
│ ├── Thumbs.db
│ ├── ar.lproj
│ │ └── Localizable.strings
│ ├── cs.lproj
│ │ └── Localizable.strings
│ ├── da.lproj
│ │ └── Localizable.strings
│ ├── de.lproj
│ │ └── Localizable.strings
│ ├── en.lproj
│ │ └── Localizable.strings
│ ├── es.lproj
│ │ └── Localizable.strings
│ ├── fail.wav
│ ├── failed.png
│ ├── failed@2x.png
│ ├── fi.lproj
│ │ └── Localizable.strings
│ ├── fr.lproj
│ │ └── Localizable.strings
│ ├── hu.lproj
│ │ └── Localizable.strings
│ ├── iSpeechLogo.png
│ ├── iSpeechLogo@2x.png
│ ├── it.lproj
│ │ └── Localizable.strings
│ ├── ja.lproj
│ │ └── Localizable.strings
│ ├── ko.lproj
│ │ └── Localizable.strings
│ ├── microphone.png
│ ├── microphone@2x.png
│ ├── nb.lproj
│ │ └── Localizable.strings
│ ├── nl.lproj
│ │ └── Localizable.strings
│ ├── pl.lproj
│ │ └── Localizable.strings
│ ├── pt-PT.lproj
│ │ └── Localizable.strings
│ ├── pt.lproj
│ │ └── Localizable.strings
│ ├── ru.lproj
│ │ └── Localizable.strings
│ ├── startRecord.wav
│ ├── stopRecord.wav
│ ├── success.png
│ ├── success.wav
│ ├── success@2x.png
│ ├── sv.lproj
│ │ └── Localizable.strings
│ ├── tr.lproj
│ │ └── Localizable.strings
│ ├── zh-Hans.lproj
│ │ └── Localizable.strings
│ └── zh-Hant.lproj
│ │ └── Localizable.strings
│ └── libiSpeechSDK.a
└── www
├── SpeechGrammar.js
├── SpeechGrammarList.js
├── SpeechRecognition.js
├── SpeechRecognitionAlternative.js
├── SpeechRecognitionError.js
├── SpeechRecognitionEvent.js
├── SpeechRecognitionResult.js
├── SpeechRecognitionResultList.js
└── browser
└── SpeechRecognition.js
/.editorconfig:
--------------------------------------------------------------------------------
1 | # This file is for unifying the coding style of different editors and IDEs.
2 | # editorconfig.org
3 |
4 | root = true
5 |
6 | [*]
7 | charset = utf-8
8 | end_of_line = lf
9 | indent_size = 4
10 | indent_style = space
11 | insert_final_newline = true
12 | trim_trailing_whitespace = true
13 |
14 | [*.json]
15 | indent_size = 2
16 |
--------------------------------------------------------------------------------
/.github/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing
2 |
3 | We love pull requests from everyone.
4 |
5 | [Fork](https://help.github.com/articles/fork-a-repo/), then
6 | [clone](https://help.github.com/articles/cloning-a-repository/) the repo:
7 |
8 | ```
9 | git clone git@github.com:your-username/SpeechRecognitionPlugin.git
10 | ```
11 |
12 | Set up a branch for your feature or bugfix with a link to the original repo:
13 |
14 | ```
15 | git checkout -b my-awesome-new-feature
16 | git push --set-upstream origin my-awesome-new-feature
17 | git remote add upstream https://github.com/macdonst/SpeechRecognitionPlugin.git
18 | ```
19 |
20 | Set up the project:
21 |
22 | ```
23 | npm install
24 | ```
25 |
26 | Make sure the tests pass before changing anything:
27 |
28 | ```
29 | npm test
30 | ```
31 |
32 | Make your change. Add tests for your change. Make the tests pass:
33 |
34 | ```
35 | npm test
36 | ```
37 |
38 | Commit changes:
39 |
40 | ```
41 | git commit -m "Cool stuff"
42 | ```
43 |
44 | Consider starting the commit message with an applicable emoji:
45 |
46 | * :art: `:art:` when improving the format/structure of the code
47 | * :zap: `:zap:` when improving performance
48 | * :non-potable_water: `:non-potable_water:` when plugging memory leaks
49 | * :memo: `:memo:` when writing docs
50 | * :ambulance: `:ambulance:` a critical hotfix.
51 | * :sparkles: `:sparkles:` when introducing new features
52 | * :bookmark: `:bookmark:` when releasing / version tags
53 | * :rocket: `:rocket:` when deploying stuff
54 | * :penguin: `:penguin:` when fixing something on Android
55 | * :apple: `:apple:` when fixing something on iOS
56 | * :checkered_flag: `:checkered_flag:` when fixing something on Windows
57 | * :bug: `:bug:` when fixing a bug
58 | * :fire: `:fire:` when removing code or files
59 | * :green_heart: `:green_heart:` when fixing the CI build
60 | * :white_check_mark: `:white_check_mark:` when adding tests
61 | * :lock: `:lock:` when dealing with security
62 | * :arrow_up: `:arrow_up:` when upgrading dependencies
63 | * :arrow_down: `:arrow_down:` when downgrading dependencies
64 | * :shirt: `:shirt:` when removing linter warnings
65 | * :hammer: `:hammer:` when doing heavy refactoring
66 | * :heavy_minus_sign: `:heavy_minus_sign:` when removing a dependency.
67 | * :heavy_plus_sign: `:heavy_plus_sign:` when adding a dependency.
68 | * :wrench: `:wrench:` when changing configuration files.
69 | * :globe_with_meridians: `:globe_with_meridians:` when dealing with
70 | internationalization and localization.
71 | * :pencil2: `:pencil2:` when fixing typos.
72 | * :hankey: `:hankey:` when writing bad code that needs to be improved.
73 | * :package: `:package:` when updating compiled files or packages.
74 |
75 | Make sure your branch is up to date with the original repo:
76 |
77 | ```
78 | git fetch upstream
79 | git merge upstream/master
80 | ```
81 |
82 | Review your changes and any possible conflicts and push to your fork:
83 |
84 | ```
85 | git push origin
86 | ```
87 |
88 | [Submit a pull request](https://help.github.com/articles/creating-a-pull-request/).
89 |
90 | At this point you're waiting on us. We do our best to keep on top of all the
91 | pull requests. We may suggest some changes, improvements or alternatives.
92 |
93 | Some things that will increase the chance that your pull request is accepted:
94 |
95 | * Write tests.
96 | * Write a [good commit message](http://chris.beams.io/posts/git-commit/).
97 | * Make sure the PR merges cleanly with the latest master.
98 | * Describe your feature/bugfix and why it's needed/important in the pull request
99 | description.
100 |
101 | ## Editor Config
102 |
103 | The project uses [.editorconfig](http://editorconfig.org/) to define the coding
104 | style of each file. We recommend that you install the Editor Config extension
105 | for your preferred IDE. Consistency is key.
106 |
107 | ## ESLint
108 |
109 | The project uses [.eslint](http://eslint.org/) to define the JavaScript coding
110 | conventions. Most editors now have a ESLint add-on to provide on-save or on-edit
111 | linting.
112 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE.md:
--------------------------------------------------------------------------------
1 | ### Expected Behaviour
2 |
3 | ### Actual Behaviour
4 |
5 | ### Reproduce Scenario (including but not limited to)
6 |
7 | #### Steps to Reproduce
8 |
9 | #### Platform and Version (eg. Android 5.0 or iOS 9.2.1)
10 |
11 | #### (Android) What device vendor (e.g. Samsung, HTC, Sony...)
12 |
13 | #### Cordova CLI version and cordova platform version
14 |
15 | cordova --version # e.g. 6.0.0
16 | cordova platform version android # e.g. 4.1.1
17 |
18 | #### Plugin version
19 |
20 | cordova plugin version | grep phonegap-plugin-speech-recognition # e.g. 1.5.3
21 |
22 | #### Sample Push Data Payload
23 |
24 | #### Sample Code that illustrates the problem
25 |
26 | #### Logs taken while reproducing problem
27 |
--------------------------------------------------------------------------------
/.github/PULL_REQUEST_TEMPLATE.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | ## Description
4 |
5 |
6 | ## Related Issue
7 |
8 |
9 |
10 |
11 |
12 | ## Motivation and Context
13 |
14 |
15 | ## How Has This Been Tested?
16 |
17 |
18 |
19 |
20 | ## Screenshots (if appropriate):
21 |
22 | ## Types of changes
23 |
24 | - [ ] Bug fix (non-breaking change which fixes an issue)
25 | - [ ] New feature (non-breaking change which adds functionality)
26 | - [ ] Breaking change (fix or feature that would cause existing functionality to change)
27 |
28 | ## Checklist:
29 |
30 |
31 | - [ ] My code follows the code style of this project.
32 | - [ ] My change requires a change to the documentation.
33 | - [ ] I have updated the documentation accordingly.
34 | - [ ] I have read the **CONTRIBUTING** document.
35 | - [ ] I have added tests to cover my changes.
36 | - [ ] All new and existing tests passed.
37 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # built application files
2 | *.apk
3 | *.ap_
4 |
5 | # files for the dex VM
6 | *.dex
7 |
8 | # Java class files
9 | *.class
10 |
11 | # generated files
12 | bin/
13 | gen/
14 |
15 | # Local configuration file (sdk path, etc)
16 | local.properties
17 |
18 | # Eclipse project files
19 | .classpath
20 | .project
21 |
22 | .DS_Store
23 | /node_modules/
24 |
--------------------------------------------------------------------------------
/.jshintrc:
--------------------------------------------------------------------------------
1 | {
2 | "asi": false,
3 | "boss": false,
4 | "camelcase": true,
5 | "curly": true,
6 | "eqeqeq": true,
7 | "eqnull": false,
8 | "es5": false,
9 | "evil": false,
10 | "expr": false,
11 | "forin": true,
12 | "funcscope": false,
13 | "jasmine": true,
14 | "immed": true,
15 | "indent": 4,
16 | "latedef": true,
17 | "loopfunc": false,
18 | "maxerr": 7,
19 | "newcap": true,
20 | "node": true,
21 | "nonew": true,
22 | "plusplus": false,
23 | "quotmark": "single",
24 | "shadow": false,
25 | "strict": false,
26 | "supernew": false,
27 | "trailing": true,
28 | "undef": true,
29 | "white": true
30 | }
31 |
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | # Change Log
2 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | The MIT License (MIT)
2 |
3 | Copyright (c) 2013 macdonst
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy of
6 | this software and associated documentation files (the "Software"), to deal in
7 | the Software without restriction, including without limitation the rights to
8 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
9 | the Software, and to permit persons to whom the Software is furnished to do so,
10 | subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
17 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
18 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
19 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
21 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | SpeechRecognitionPlugin
2 | =======================
3 |
4 | W3C Web Speech API - Speech Recognition plugin for PhoneGap
5 |
6 | Update 2013/09/05
7 | =================
8 |
9 | Back to work on this but it's not ready yet so don't try to use.
10 |
11 | Update 2013/08/05
12 | =================
13 |
14 | Hi, you are all probably wondering where the code is after seeing my PhoneGap Day US presentation or reading the slides. Well, I've been dealing with an illness in the family and have not has as much spare time as I would have hoped to update this project. However, things are working out better than I could have hoped for and I should have time to concentrate on this very soon.
15 |
16 | Update 2015/04/04
17 | =================
18 |
19 | Basic example is working on iOS and android
20 | ```
21 |
35 |
39 | ```
40 |
41 | Example from section 6.1 Speech Recognition Examples of the W3C page
42 | (https://dvcs.w3.org/hg/speech-api/raw-file/tip/speechapi.html#examples)
43 |
44 | To install the plugin use
45 |
46 | ```
47 | cordova plugin add https://github.com/macdonst/SpeechRecognitionPlugin
48 | ```
49 |
50 | Since iOS 10 it's mandatory to add a `NSMicrophoneUsageDescription` in the info.plist to access the microphone.
51 |
52 |
53 | To add this entry you can pass the `MICROPHONE_USAGE_DESCRIPTION` variable on plugin install.
54 |
55 |
56 | Example:
57 |
58 | `cordova plugin add https://github.com/macdonst/SpeechRecognitionPlugin --variable MICROPHONE_USAGE_DESCRIPTION="your usage message"`
59 |
60 | If the variable is not provided it will use an empty message, but a usage description string is mandatory to submit your app to the Apple Store.
61 |
62 |
63 | On iOS 10 and greater it uses the native SFSpeechRecognizer (same as Siri).
64 |
65 | Supported locales for SFSpeechRecognizer are:
66 | ro-RO, en-IN, he-IL, tr-TR, en-NZ, sv-SE, fr-BE, it-CH, de-CH, pl-PL, pt-PT, uk-UA, fi-FI, vi-VN, ar-SA, zh-TW, es-ES, en-GB, yue-CN, th-TH, en-ID, ja-JP, en-SA, en-AE, da-DK, fr-FR, sk-SK, de-AT, ms-MY, hu-HU, ca-ES, ko-KR, fr-CH, nb-NO, en-AU, el-GR, ru-RU, zh-CN, en-US, en-IE, nl-BE, es-CO, pt-BR, es-US, hr-HR, fr-CA, zh-HK, es-MX, id-ID, it-IT, nl-NL, cs-CZ, en-ZA, es-CL, en-PH, en-CA, en-SG, de-DE
67 |
68 | Two-character codes can be used too.
69 |
70 | On iOS 9 and older it uses iSpeech SDK, an API key is required, get one on https://www.ispeech.org/, it's free.
71 | To provide the key, add this preference inside the config.xml
72 | ```
73 |
74 | ```
75 | If none is provided it will use the demo key "developerdemokeydeveloperdemokey"
76 |
77 | iSpeech supported languages are:
78 |
79 | English (Canada) (en-CA)
80 | English (United States) (en-US)
81 | Spanish (Spain) (es-ES)
82 | French (France) (fr-FR)
83 | Italian (Italy) (it-IT)
84 | Polish (Poland) (pl-PL)
85 | Portuguese (Portugal) (pt-PT)
86 |
87 |
88 | Two-character codes can be used too, but for English, "en" will use "en-US"
89 |
90 |
91 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "phonegap-plugin-speech-recognition",
3 | "version": "0.3.0",
4 | "description": "Cordova Speech Recognition Plugin",
5 | "cordova": {
6 | "id": "phonegap-plugin-speech-recognition",
7 | "platforms": ["android", "ios"]
8 | },
9 | "repository": {
10 | "type": "git",
11 | "url": "git+https://github.com/macdonst/SpeechRecognitionPlugin.git"
12 | },
13 | "keywords": [
14 | "cordova",
15 | "speech",
16 | "recognition",
17 | "ecosystem:cordova",
18 | "cordova-android",
19 | "cordova-ios"
20 | ],
21 | "author": "Simon MacDonald",
22 | "license": "MIT",
23 | "bugs": {
24 | "url": "https://github.com/macdonst/SpeechRecognitionPlugin/issues"
25 | },
26 | "homepage": "https://github.com/macdonst/SpeechRecognitionPlugin#readme"
27 | }
28 |
--------------------------------------------------------------------------------
/plugin.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 | SpeechRecognition
6 | Cordova Speech Recognition Plugin
7 | MIT
8 | cordova,speech,recognition
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 |
87 |
88 |
89 |
90 |
91 |
92 |
93 |
94 | $MICROPHONE_USAGE_DESCRIPTION
95 |
96 |
97 |
98 | $SPEECH_RECOGNITION_USAGE_DESCRIPTION
99 |
100 |
101 |
102 |
103 |
104 |
105 |
106 |
107 |
--------------------------------------------------------------------------------
/src/android/SpeechRecognition.java:
--------------------------------------------------------------------------------
1 | package org.apache.cordova.speech;
2 |
3 | import java.util.ArrayList;
4 |
5 | import org.apache.cordova.PermissionHelper;
6 | import org.json.JSONArray;
7 | import org.json.JSONException;
8 | import org.json.JSONObject;
9 |
10 | import org.apache.cordova.CallbackContext;
11 | import org.apache.cordova.CordovaPlugin;
12 | import org.apache.cordova.PluginResult;
13 |
14 | import android.content.pm.PackageManager;
15 | import android.util.Log;
16 | import android.content.Intent;
17 | import android.os.Bundle;
18 | import android.os.Handler;
19 | import android.os.Looper;
20 | import android.speech.RecognitionListener;
21 | import android.speech.RecognizerIntent;
22 | import android.speech.SpeechRecognizer;
23 | import android.Manifest;
24 |
25 | /**
26 | * Style and such borrowed from the TTS and PhoneListener plugins
27 | */
28 | public class SpeechRecognition extends CordovaPlugin {
29 | private static final String LOG_TAG = SpeechRecognition.class.getSimpleName();
30 | public static final String ACTION_INIT = "init";
31 | public static final String ACTION_SPEECH_RECOGNIZE_START = "start";
32 | public static final String ACTION_SPEECH_RECOGNIZE_STOP = "stop";
33 | public static final String ACTION_SPEECH_RECOGNIZE_ABORT = "abort";
34 | public static final String NOT_PRESENT_MESSAGE = "Speech recognition is not present or enabled";
35 |
36 | private CallbackContext speechRecognizerCallbackContext;
37 | private boolean recognizerPresent = false;
38 | private SpeechRecognizer recognizer;
39 | private boolean aborted = false;
40 | private boolean listening = false;
41 | private String lang;
42 |
43 | private static String [] permissions = { Manifest.permission.RECORD_AUDIO };
44 | private static int RECORD_AUDIO = 0;
45 |
46 | protected void getMicPermission()
47 | {
48 | PermissionHelper.requestPermission(this, RECORD_AUDIO, permissions[RECORD_AUDIO]);
49 | }
50 |
51 | private void promptForMic()
52 | {
53 | if(PermissionHelper.hasPermission(this, permissions[RECORD_AUDIO])) {
54 | this.startRecognition();
55 | }
56 | else
57 | {
58 | getMicPermission();
59 | }
60 |
61 | }
62 |
63 | public void onRequestPermissionResult(int requestCode, String[] permissions,
64 | int[] grantResults) throws JSONException
65 | {
66 | for(int r:grantResults)
67 | {
68 | if(r == PackageManager.PERMISSION_DENIED)
69 | {
70 | fireErrorEvent();
71 | fireEvent("end");
72 | return;
73 | }
74 | }
75 | promptForMic();
76 | }
77 |
78 | @Override
79 | public boolean execute(String action, JSONArray args, CallbackContext callbackContext) {
80 | // Dispatcher
81 | if (ACTION_INIT.equals(action)) {
82 | // init
83 | if (DoInit()) {
84 | callbackContext.sendPluginResult(new PluginResult(PluginResult.Status.OK));
85 |
86 | Handler loopHandler = new Handler(Looper.getMainLooper());
87 | loopHandler.post(new Runnable() {
88 |
89 | @Override
90 | public void run() {
91 | recognizer = SpeechRecognizer.createSpeechRecognizer(cordova.getActivity().getBaseContext());
92 | recognizer.setRecognitionListener(new SpeechRecognitionListner());
93 | }
94 |
95 | });
96 | } else {
97 | callbackContext.sendPluginResult(new PluginResult(PluginResult.Status.ERROR, NOT_PRESENT_MESSAGE));
98 | }
99 | }
100 | else if (ACTION_SPEECH_RECOGNIZE_START.equals(action)) {
101 | // recognize speech
102 | if (!recognizerPresent) {
103 | callbackContext.sendPluginResult(new PluginResult(PluginResult.Status.ERROR, NOT_PRESENT_MESSAGE));
104 | }
105 | this.lang = args.optString(0, "en");
106 | this.speechRecognizerCallbackContext = callbackContext;
107 | this.promptForMic();
108 | }
109 | else if (ACTION_SPEECH_RECOGNIZE_STOP.equals(action)) {
110 | stop(false);
111 | }
112 | else if (ACTION_SPEECH_RECOGNIZE_ABORT.equals(action)) {
113 | stop(true);
114 | }
115 | else {
116 | // Invalid action
117 | String res = "Unknown action: " + action;
118 | return false;
119 | }
120 | return true;
121 | }
122 |
123 | private void startRecognition() {
124 |
125 | final Intent intent = new Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH);
126 | intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL,RecognizerIntent.LANGUAGE_MODEL_FREE_FORM);
127 | intent.putExtra(RecognizerIntent.EXTRA_CALLING_PACKAGE,"voice.recognition.test");
128 | intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE,lang);
129 |
130 | intent.putExtra(RecognizerIntent.EXTRA_MAX_RESULTS,5);
131 |
132 | Handler loopHandler = new Handler(Looper.getMainLooper());
133 | loopHandler.post(new Runnable() {
134 |
135 | @Override
136 | public void run() {
137 | recognizer.startListening(intent);
138 | }
139 |
140 | });
141 |
142 | PluginResult res = new PluginResult(PluginResult.Status.NO_RESULT);
143 | res.setKeepCallback(true);
144 | this.speechRecognizerCallbackContext.sendPluginResult(res);
145 | }
146 |
147 | private void stop(boolean abort) {
148 | this.aborted = abort;
149 | Handler loopHandler = new Handler(Looper.getMainLooper());
150 | loopHandler.post(new Runnable() {
151 |
152 | @Override
153 | public void run() {
154 | recognizer.stopListening();
155 | }
156 |
157 | });
158 | }
159 |
160 | /**
161 | * Initialize the speech recognizer by checking if one exists.
162 | */
163 | private boolean DoInit() {
164 | this.recognizerPresent = SpeechRecognizer.isRecognitionAvailable(this.cordova.getActivity().getBaseContext());
165 | return this.recognizerPresent;
166 | }
167 |
168 | private void fireRecognitionEvent(ArrayList transcripts, float[] confidences) {
169 | JSONObject event = new JSONObject();
170 | JSONArray results = new JSONArray();
171 | try {
172 | for(int i=0; i transcript = results.getStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION);
276 | float[] confidence = results.getFloatArray(SpeechRecognizer.CONFIDENCE_SCORES);
277 | if (transcript.size() > 0) {
278 | Log.d(LOG_TAG, "fire recognition event");
279 | fireRecognitionEvent(transcript, confidence);
280 | } else {
281 | Log.d(LOG_TAG, "fire no match event");
282 | fireEvent("nomatch");
283 | }
284 | listening = false;
285 | }
286 |
287 | @Override
288 | public void onRmsChanged(float rmsdB) {
289 | Log.d(LOG_TAG, "rms changed");
290 | }
291 |
292 | }
293 | }
--------------------------------------------------------------------------------
/src/ios/Headers/ISSpeechRecognition.h:
--------------------------------------------------------------------------------
1 | //
2 | // ISSpeechRecognition.h
3 | // iSpeechSDK
4 | //
5 | // Copyright (c) 2012 iSpeech, Inc. All rights reserved.
6 | //
7 |
8 | #import
9 | #import
10 |
11 | #import "iSpeechSDK.h"
12 |
13 | #import "ISSpeechRecognitionLocales.h"
14 | #import "ISSpeechRecognitionResult.h"
15 |
16 | /**
17 | * The type of model to use when trascribing audio. Currently, only SMS and Dictation are available.
18 | */
19 | enum {
20 | ISFreeFormTypeSMS = 1,
21 | ISFreeFormTypeVoicemail = 2,
22 | ISFreeFormTypeDictation = 3,
23 | ISFreeFormTypeMessage = 4,
24 | ISFreeFormTypeInstantMessage = 5,
25 | ISFreeFormTypeTranscript = 6,
26 | ISFreeFormTypeMemo = 7,
27 | };
28 |
29 | typedef NSUInteger ISFreeFormType;
30 |
31 | #if NS_BLOCKS_AVAILABLE
32 |
33 | /*^*
34 | * The callback handler for speech recognition request.
35 | *
36 | * @param error An error, if one occured, or `nil`.
37 | * @param result The result of a successful recognition, or `nil` if there's an error.
38 | * @param cancelledByUser Whether speech recognition finished because a user cancelled the request.
39 | */
40 | typedef void(^ISSpeechRecognitionHandler)(NSError *error, ISSpeechRecognitionResult *result, BOOL cancelledByUser);
41 |
42 | #endif
43 |
44 | @class ISSpeechRecognition;
45 |
46 | /**
47 | * Delegate protocol for `ISSpeechRecognition`.
48 | *
49 | * The only required method is for getting the result from speech recognition.
50 | */
51 | @protocol ISSpeechRecognitionDelegate
52 |
53 | @required
54 |
55 | /**
56 | * Speech recognition successfully completed, and a result was sent back from the server.
57 | *
58 | * If you get no result text back, and a confience level of 0.0, then, most likely, the user didn't speak anything.
59 | *
60 | * @param speechRecognition The speech recognition instance that completed.
61 | * @param result The result text and confidence level.
62 | */
63 | - (void)recognition:(ISSpeechRecognition *)speechRecognition didGetRecognitionResult:(ISSpeechRecognitionResult *)result;
64 |
65 | @optional
66 |
67 | /**
68 | * Something went wrong, speech recognition failed, and an error was passed back.
69 | *
70 | * @param speechRecognition The speech recognition instance that was cancelled.
71 | * @param error The acutal error. Errors from the SDK internals will have the error domain of `iSpeechErrorDomain`. You may get some URL connection errors if something happens with the network.
72 | */
73 | - (void)recognition:(ISSpeechRecognition *)speechRecognition didFailWithError:(NSError *)error;
74 |
75 | /**
76 | * Speech recognition was cancelled by the user.
77 | *
78 | * @param speechRecognition The speech recognition instance that was cancelled.
79 | */
80 | - (void)recognitionCancelledByUser:(ISSpeechRecognition *)speechRecognition;
81 |
82 | /**
83 | * Recording the user's speech has started.
84 | *
85 | * @param speechRecognition The speech recognition instance that started recording audio.
86 | */
87 | - (void)recognitionDidBeginRecording:(ISSpeechRecognition *)speechRecognition;
88 |
89 | /**
90 | * Speech recognition has finished recording and is moving on to recognizing the text.
91 | *
92 | * This happens when the timeout is hit for a timed listen, or when the user taps the "Done" button on the dialog.
93 | *
94 | * @param speechRecognition The speech recognition instance that finished recording.
95 | */
96 | - (void)recognitionDidFinishRecording:(ISSpeechRecognition *)speechRecognition;
97 |
98 | @end
99 |
100 | /**
101 | * The interface for doing speech recognition in the SDK.
102 | */
103 | @interface ISSpeechRecognition : NSObject
104 |
105 | /** @name Getting and Setting the Delegate */
106 |
107 | /**
108 | * The delegate of a speech recognition object.
109 | *
110 | * The delegate must adopt the `` protocol.
111 | */
112 | @property (nonatomic, unsafe_unretained) id delegate;
113 |
114 | /** @name Configuration Properties */
115 |
116 | /**
117 | * Sets the locale to use for speech recognition.
118 | *
119 | * Most of the time, the value passed is a ISO country code. To get our supported ISOs, consult "Freeform Dictation Languages" under "Speech Recognition Settings" when viewing details about a specific key.
120 | */
121 | @property (nonatomic, copy) NSString *locale CONFIGURATION_METHOD;
122 |
123 | /**
124 | * Allows you to set a custom language model for speech recognition.
125 | */
126 | @property (nonatomic, copy) NSString *model CONFIGURATION_METHOD;
127 |
128 | /**
129 | * The type of model to use when trascribing audio. Defaults to `ISFreeFormTypeDictation`.
130 | */
131 | @property (nonatomic, assign) ISFreeFormType freeformType CONFIGURATION_METHOD;
132 |
133 | /**
134 | * Whether silence detection should be used to automatically detect when someone's done talking.
135 | */
136 | @property (nonatomic, assign) BOOL silenceDetectionEnabled CONFIGURATION_METHOD;
137 |
138 | /** @name Detecting Audio Input */
139 |
140 | /**
141 | * Returns whether audio input is available for speech recognition. You can check this before creating an instance of `ISSpeechRecognition`, as well as to dynamically update your UI with what you can do.
142 | *
143 | * @return Returns whether audio input is available.
144 | */
145 | + (BOOL)audioInputAvailable;
146 |
147 | /** @name Aliases and Commands */
148 |
149 | /**
150 | * Adds a list of items as an alias.
151 | *
152 | * Think of using an alias list as a way of doing a regular expression. For example, if you want to use a regular expression to match "call joe", "call charlie", or "call ben", then it would be `call (joe|charlie|ben)`. Similarly, to do that with an alias list,
153 | *
154 | * [speechRecognitionInstance addAlias:@"PEOPLE" forItems:[NSArray arrayWithObjects:@"joe", @"charlie", @"ben", nil]];
155 | * [speechRecognitionInstance addCommand:@"call %PEOPLE%"];
156 | *
157 | * @param alias The string to use for the alias key.
158 | * @param items The array of items to be substituted for the alias.
159 | */
160 | - (void)addAlias:(NSString *)alias forItems:(NSArray *)items;
161 |
162 | /**
163 | * Adds a command.
164 | *
165 | * If you want to reference an alias list, the format is `%ALIAS_LIST_NAME%`. Replace `ALIAS_LIST_NAME` with the actual name of your alias list.
166 | *
167 | * @param command The command to be added.
168 | */
169 | - (void)addCommand:(NSString *)command;
170 |
171 | /**
172 | * Add multiple commands from an array.
173 | *
174 | * @param commands The array of commands to be added.
175 | */
176 | - (void)addCommands:(NSArray *)commands;
177 |
178 | /**
179 | * Clears any command or alias lists on this speech recognition object.
180 | */
181 | - (void)resetCommandsAndAliases;
182 |
183 | /** @name Listening and Recognizing */
184 |
185 | /**
186 | * Start an untimed listen.
187 | *
188 | * An untimed listen means that the SDK will start listening, and will not stop unless you tell it to by calling -[ISSpeechRecognition finishListenAndStartRecognize], or until silence detection kicks in, if you have that enabled.
189 | *
190 | * If you're using a command or alias list, use `-listenAndRecognizeWithTimeout:error:` instead. This will ensure that speech recognition will only last as long as is necessary, thus saving the user's battery life and data plan, ensuring that you get a result back, and providing an better overall experience.
191 | *
192 | * @param err An `NSError` pointer to get an error object out of the method if something goes wrong.
193 | * @return Returns whatever speech synthesis successfully started. If this returns `NO`, check the error for details on what went wrong.
194 | * @see listenAndRecognizeWithTimeout:error:
195 | * @see finishListenAndStartRecognize
196 | */
197 | - (BOOL)listen:(NSError **)err;
198 |
199 | /**
200 | * If you're running an untimed listen, or if you want to cut a timed listen short, call this method to tell the SDK to stop listening for audio, and finish up transcribing.
201 | */
202 | - (void)finishListenAndStartRecognize;
203 |
204 | /**
205 | * Starts a timed listen. After a set timeout, the SDK will stop listening for audio and will start to transcribe it.
206 | *
207 | * Useful when using command lists to ensure that the user doesn't talk longer than necessary.
208 | *
209 | * @param timeout The amount of time, in seconds, for the timed listen to last for.
210 | * @param err An `NSError` pointer to get an error object out of the method if something goes wrong.
211 | * @return Returns whatever speech synthesis successfully started. If this returns `NO`, check the error for details on what went wrong.
212 | */
213 | - (BOOL)listenAndRecognizeWithTimeout:(NSTimeInterval)timeout error:(NSError **)err;
214 |
215 | #if NS_BLOCKS_AVAILABLE
216 |
217 | /**
218 | * Start an untimed listen.
219 | *
220 | * An untimed listen means that the SDK will start listening, and will not stop unless you tell it to by calling -[ISSpeechRecognition finishListenAndStartRecognize], or until silence detection kicks in, if you have that enabled.
221 | *
222 | * If you're using a command or alias list, use `-listenAndRecognizeWithTimeout:handler:` instead. This will ensure that speech recognition will only last as long as is necessary, thus saving the user's battery life and data plan, ensuring that you get a result back, and providing an better overall experience.
223 | *
224 | * @param handler An `ISSpeechRecognitionHandler` block that will be executed on the main thread when speech recognition completes, or when an error occurs.
225 | * @see listenAndRecognizeWithTimeout:handler:
226 | * @see finishListenAndStartRecognize
227 | */
228 | - (void)listenWithHandler:(ISSpeechRecognitionHandler)handler;
229 |
230 | /**
231 | * Starts a timed listen. After a set timeout, the SDK will stop listening for audio and will start to transcribe it.
232 | *
233 | * Useful when using command lists to ensure that the user doesn't talk longer than necessary.
234 | *
235 | * @param timeout The amount of time, in seconds, for the timed listen to last for.
236 | * @param handler An `ISSpeechRecognitionHandler` block that will be executed on the main thread when speech recognition completes, or when an error occurs.
237 | */
238 | - (void)listenAndRecognizeWithTimeout:(NSTimeInterval)timeout handler:(ISSpeechRecognitionHandler)handler;
239 |
240 | #endif
241 |
242 | /**
243 | * Cancels an in progress speech recognition action.
244 | *
245 | * If, for some reason, you need to cancel an in progress speech recognition action, call this method. It's also a good idea to provide feedback to the user as to why you cancelled it.
246 | */
247 | - (void)cancel;
248 |
249 | @end
--------------------------------------------------------------------------------
/src/ios/Headers/ISSpeechRecognitionLocales.h:
--------------------------------------------------------------------------------
1 | //
2 | // ISSpeechRecognitionLocales.h
3 | // iSpeechSDK
4 | //
5 | // Copyright (c) 2012 iSpeech, Inc. All rights reserved.
6 | //
7 |
8 | #import
9 |
10 | extern NSString *const ISLocaleUSEnglish;
11 | extern NSString *const ISLocaleCAEnglish;
12 | extern NSString *const ISLocaleGBEnglish;
13 | extern NSString *const ISLocaleAUEnglish;
14 | extern NSString *const ISLocaleESSpanish;
15 | extern NSString *const ISLocaleMXSpanish;
16 | extern NSString *const ISLocaleITItalian;
17 | extern NSString *const ISLocaleFRFrench;
18 | extern NSString *const ISLocaleCAFrench;
19 | extern NSString *const ISLocalePLPolish;
20 | extern NSString *const ISLocaleBRPortuguese;
21 | extern NSString *const ISLocalePTPortuguese;
22 | extern NSString *const ISLocaleCACatalan;
23 | extern NSString *const ISLocaleCNChinese;
24 | extern NSString *const ISLocaleHKChinese;
25 | extern NSString *const ISLocaleTWChinese;
26 | extern NSString *const ISLocaleDKDanish;
27 | extern NSString *const ISLocaleDEGerman;
28 | extern NSString *const ISLocaleFIFinish;
29 | extern NSString *const ISLocaleJAJapanese;
30 | extern NSString *const ISLocaleKRKorean;
31 | extern NSString *const ISLocaleNLDutch;
32 | extern NSString *const ISLocaleNONorwegian;
33 | extern NSString *const ISLocaleRURussian;
34 | extern NSString *const ISLocaleSESwedish;
35 |
--------------------------------------------------------------------------------
/src/ios/Headers/ISSpeechRecognitionResult.h:
--------------------------------------------------------------------------------
1 | //
2 | // ISSpeechRecognitionResult.h
3 | // iSpeechSDK
4 | //
5 | // Copyright (c) 2012 iSpeech, Inc. All rights reserved.
6 | //
7 |
8 | #import
9 |
10 | /**
11 | * This class contains information about a successful recognition.
12 | */
13 | @interface ISSpeechRecognitionResult : NSObject
14 |
15 | /**
16 | * The transcribed text returned from a recognition.
17 | */
18 | @property (nonatomic, copy, readonly) NSString *text;
19 |
20 | /**
21 | * How confident the speech recognizer was. Scale from 0.0 to 1.0.
22 | */
23 | @property (nonatomic, assign, readonly) float confidence;
24 |
25 | @end
26 |
--------------------------------------------------------------------------------
/src/ios/Headers/ISSpeechSynthesis.h:
--------------------------------------------------------------------------------
1 | //
2 | // ISSpeechSynthesis.h
3 | // iSpeechSDK
4 | //
5 | // Copyright (c) 2012 iSpeech, Inc. All rights reserved.
6 | //
7 |
8 | #import
9 |
10 | #import "iSpeechSDK.h"
11 | #import "ISSpeechSynthesisVoices.h"
12 |
13 | #if NS_BLOCKS_AVAILABLE
14 |
15 | /*^*
16 | * The callback handler for a speech synthesis request.
17 | *
18 | * @param error An error for the request, if one occurred, otherwise, `nil`.
19 | * @param userCancelled Whether speech synthesis finished as a result of user cancellation or not.
20 | */
21 | typedef void(^ISSpeechSynthesisHandler)(NSError *error, BOOL userCancelled);
22 |
23 | #endif
24 |
25 | @class ISSpeechSynthesis;
26 |
27 | /**
28 | * Delegate protocol for `ISSpeechSynthesis`.
29 | *
30 | * All methods are optional.
31 | */
32 | @protocol ISSpeechSynthesisDelegate
33 |
34 | @optional
35 |
36 | /**
37 | * The specified speech synthesis instance started speaking. Audio is now playing.
38 | *
39 | * @param speechSynthesis The speech synthesis object that is speaking.
40 | */
41 | - (void)synthesisDidStartSpeaking:(ISSpeechSynthesis *)speechSynthesis;
42 |
43 | /**
44 | * The specified speech synthesis isntance finished speaking, either on its own or because the user cancelled it.
45 | *
46 | * @param speechSynthesis The speech synthesis object that finished speaking.
47 | * @param userCancelled Whether the user was responsible for cancelling the speech synthesis, usually by tapping the "Cancel" button on the dialog.
48 | */
49 | - (void)synthesisDidFinishSpeaking:(ISSpeechSynthesis *)speechSynthesis userCancelled:(BOOL)userCancelled;
50 |
51 | /**
52 | * Something went wrong with the speech synthesis. Usually this is used for errors returned by the server.
53 | *
54 | * @param speechSynthesis The speech synthesis object that the error occurred on.
55 | * @param error The acutal error. Errors from the SDK internals will have the error domain of `iSpeechErrorDomain`. You may get some URL connection errors if something happens with the network.
56 | */
57 | - (void)synthesis:(ISSpeechSynthesis *)speechSynthesis didFailWithError:(NSError *)error;
58 |
59 | @end
60 |
61 | /**
62 | * The interface for doing speech synthesis in the SDK.
63 | */
64 | @interface ISSpeechSynthesis : NSObject
65 |
66 | /** @name Getting and Setting the Delegate */
67 |
68 | /**
69 | * The delegate of a speech synthesis object.
70 | *
71 | * The delegate must adopt the `` protocol.
72 | */
73 | @property (nonatomic, unsafe_unretained) id delegate;
74 |
75 | /** @name Configuration Properties */
76 |
77 | /**
78 | * Sets the voice to use for this speech synthesis instance.
79 | *
80 | * Voices are listed in the `ISSpeechSynthesisVoices.h` header file. You are not limited to that list; they are only standard voices. If you specify an invalid voice, the delegate will get an error.
81 | */
82 | @property (nonatomic, copy) NSString *voice CONFIGURATION_METHOD;
83 |
84 | /**
85 | * Sets the speed to use for speech synthesis.
86 | *
87 | * This should be a number anywhere between -10 and 10, with -10 being the slowest, and 10 being the fastest. If you provide a number larger than 10, the speed will be set to 10. Likewise, if you provide a number smaller than -10, the speed will be set to -10.
88 | */
89 | @property (nonatomic, assign) NSInteger speed CONFIGURATION_METHOD;
90 |
91 | /**
92 | * The bitrate of the synthesised speech.
93 | *
94 | * The higher the bitrate, the better quality the audio, but the larger the file size of the data being sent, which results in more buffering needed to load all that data. Default value is 48, which is sutable for WiFi, 4G, and 3G.
95 | *
96 | * Valid values include 8, 16, 24, 32, 48, 56, 64, 80, 96, 112, 128, 144, 160, 192, 224, 256, and 320, as well as any others listed under "Bit Rates" for an API key's Text-to-Speech Settings.
97 | */
98 | @property (nonatomic, assign) NSInteger bitrate CONFIGURATION_METHOD;
99 |
100 | /** Getting and Setting the Text */
101 |
102 | /**
103 | * The text to speak.
104 | */
105 | @property (nonatomic, copy) NSString *text;
106 |
107 | /** @name Creating an Instance */
108 |
109 | /**
110 | * Create a new `ISSpeechSynthesis` object with the supplied text.
111 | *
112 | * @param text The initial text for the speech synthesis object.
113 | */
114 | - (id)initWithText:(NSString *)text;
115 |
116 | /** @name Speaking Text */
117 |
118 | /**
119 | * Speak the text that was specified when creating this instance.
120 | *
121 | * @param err An `NSError` pointer to get an error object out of the method if something goes wrong.
122 | * @return Whether synthesis successfully started. If this returns `NO`, check the error for details on what went wrong.
123 | */
124 | - (BOOL)speak:(NSError **)err;
125 |
126 | #if NS_BLOCKS_AVAILABLE
127 |
128 | /**
129 | * Speak the text that was specified when creating this instance.
130 | *
131 | * @param handler A `ISSpeechSynthesisHandler` block to be executed when speaking finishes, or when an error occurs. This handler will be called on the main thread.
132 | */
133 | - (void)speakWithHandler:(ISSpeechSynthesisHandler)handler;
134 |
135 | #endif
136 |
137 | /**
138 | * Cancels an in-progress speech synthesis action.
139 | */
140 | - (void)cancel;
141 |
142 | @end
143 |
--------------------------------------------------------------------------------
/src/ios/Headers/ISSpeechSynthesisVoices.h:
--------------------------------------------------------------------------------
1 | //
2 | // ISSpeechSynthesisVoices.h
3 | // iSpeechSDK
4 | //
5 | // Copyright (c) 2012 iSpeech, Inc. All rights reserved.
6 | //
7 |
8 | #import
9 |
10 | extern NSString *const ISVoiceUSEnglishFemale;
11 | extern NSString *const ISVoiceUSEnglishMale;
12 | extern NSString *const ISVoiceUKEnglishFemale;
13 | extern NSString *const ISVoiceUKEnglishMale;
14 | extern NSString *const ISVoiceAUEnglishFemale;
15 | extern NSString *const ISVoiceUSSpanishFemale;
16 | extern NSString *const ISVoiceUSSpanishMale;
17 | extern NSString *const ISVoiceCHChineseFemale;
18 | extern NSString *const ISVoiceCHChineseMale;
19 | extern NSString *const ISVoiceHKChineseFemale;
20 | extern NSString *const ISVoiceTWChineseFemale;
21 | extern NSString *const ISVoiceJPJapaneseFemale;
22 | extern NSString *const ISVoiceJPJapaneseMale;
23 | extern NSString *const ISVoiceKRKoreanFemale;
24 | extern NSString *const ISVoiceKRKoreanMale;
25 | extern NSString *const ISVoiceCAEnglishFemale;
26 | extern NSString *const ISVoiceHUHungarianFemale;
27 | extern NSString *const ISVoiceBRPortugueseFemale;
28 | extern NSString *const ISVoiceEURPortugueseFemale;
29 | extern NSString *const ISVoiceEURPortugueseMale;
30 | extern NSString *const ISVoiceEURSpanishFemale;
31 | extern NSString *const ISVoiceEURSpanishMale;
32 | extern NSString *const ISVoiceEURCatalanFemale;
33 | extern NSString *const ISVoiceEURCzechFemale;
34 | extern NSString *const ISVoiceEURDanishFemale;
35 | extern NSString *const ISVoiceEURFinnishFemale;
36 | extern NSString *const ISVoiceEURFrenchFemale;
37 | extern NSString *const ISVoiceEURFrenchMale;
38 | extern NSString *const ISVoiceEURNorwegianFemale;
39 | extern NSString *const ISVoiceEURDutchFemale;
40 | extern NSString *const ISVoiceEURDutchMale;
41 | extern NSString *const ISVoiceEURPolishFemale;
42 | extern NSString *const ISVoiceEURItalianFemale;
43 | extern NSString *const ISVoiceEURItalianMale;
44 | extern NSString *const ISVoiceEURTurkishFemale;
45 | extern NSString *const ISVoiceEURTurkishMale;
46 | extern NSString *const ISVoiceEURGermanFemale;
47 | extern NSString *const ISVoiceEURGermanMale;
48 | extern NSString *const ISVoiceRURussianFemale;
49 | extern NSString *const ISVoiceRURussianMale;
50 | extern NSString *const ISVoiceSWSwedishFemale;
51 | extern NSString *const ISVoiceCAFrenchFemale;
52 | extern NSString *const ISVoiceCAFrenchMale;
53 | extern NSString *const ISVoiceArabicMale;
54 |
--------------------------------------------------------------------------------
/src/ios/Headers/iSpeechSDK.h:
--------------------------------------------------------------------------------
1 | //
2 | // iSpeechSDK.h
3 | // iSpeechSDK
4 | //
5 | // Copyright (c) 2012 iSpeech, Inc. All rights reserved.
6 | //
7 |
8 | #import
9 |
10 | // Methods marked with `CONFIGURATION_METHOD` can be set globally, for all objects, by calling the methods on [[iSpeechSDK sharedSDK] configuration]. This mimics the Appearance API in iOS 5.
11 | #define CONFIGURATION_METHOD
12 |
13 | #import "ISSpeechSynthesis.h"
14 | #import "ISSpeechRecognition.h"
15 |
16 | // Protocol used by objects that act as the proxy for the Configuration API. For details on each property here, look at ISSpeechSynthesis and ISSpeechRecognition.
17 | @protocol ISConfiguration
18 |
19 | @property (nonatomic, copy) NSString *voice;
20 | @property (nonatomic, assign) NSInteger speed;
21 | @property (nonatomic, assign) NSInteger bitrate;
22 |
23 | @property (nonatomic, copy) NSString *locale;
24 | @property (nonatomic, copy) NSString *model;
25 |
26 | @property (nonatomic, assign) NSUInteger freeformType;
27 |
28 | @property (nonatomic, assign) BOOL silenceDetectionEnabled;
29 | @property (nonatomic, assign) BOOL adaptiveBitrateEnabled;
30 |
31 | @end
32 |
33 | /**
34 | * The error domain for errors returned by the SDK.
35 | */
36 | extern NSString *const iSpeechErrorDomain;
37 |
38 | /**
39 | * Possible error codes returned by the SDK.
40 | *
41 | * Some of these should not be returned by the SDK (ones like `kISpeechErrorCodeInvalidFileFormat` and `kISpeechErrorCodeInvalidContentType`) because you don't have control over them. However, they are included in the off chance that something does go wrong with the server and they are returned. Codes that shouldn't be returned are marked with an asterisk (`*`).
42 | *
43 | * When you get an error during speech recognition, tell the user that something went wrong. If you get `kISpeechErrorCodeNoInputAvailable`, `kISpeechErrorCodeNoInternetConnection`, or `kISpeechErrorCodeLostInput` the error messages on those NSError instances have been localized, and are presentable to the user.
44 | */
45 | enum _ISpeechErrorCode {
46 | kISpeechErrorCodeInvalidAPIKey = 1, // You provided an invalid API key.
47 | kISpeechErrorCodeUnableToConvert = 2, // The server was unable to convert your text to speech.
48 | kISpeechErrorCodeNotEnoughCredits = 3, // Your API key doesn't have the necessary credits required to complete this transaction.
49 | kISpeechErrorCodeNoActionSpecified = 4, // *
50 | kISpeechErrorCodeInvalidText = 5, // Usually, this error occurs when no text is sent to the server, or, for example, Japanese characters are sent to the English voice.
51 | kISpeechErrorCodeTooManyWords = 6, // You tried to convert too many words to speech.
52 | kISpeechErrorCodeInvalidTextEntry = 7, // *
53 | kISpeechErrorCodeInvalidVoice = 8, // You specified a voice that either doesn't exist, or that you don't have access to.
54 | kISpeechErrorCodeInvalidFileFormat = 12, // *
55 | kISpeechErrorCodeInvalidSpeed = 13, // *
56 | kISpeechErrorCodeInvalidDictionary = 14, // *
57 | kISpeechErrorCodeInvalidBitrate = 15, // You specified a bitrate that isn't one of the allowed values. See -[ISSpeechSynthesis bitrate] for details on valid values.
58 | kISpeechErrorCodeInvalidFrequency = 16, // *
59 | kISpeechErrorCodeInvalidAliasList = 17, // *
60 | kISpeechErrorCodeAliasMissing = 18, // *
61 | kISpeechErrorCodeInvalidContentType = 19, // *
62 | kISpeechErrorCodeAliasListTooComplex = 20, // *
63 | kISpeechErrorCodeCouldNotRecognize = 21, // If the audio isn't clear enough, or corrupted, this error will get returned. It's usually good UX to prompt the user to try again.
64 | kISpeechErrorCodeOptionNotEnabled = 30, // Option not enabled for your account. Please contact iSpeech sales at +1 (917) 338-7723 or at sales@ispeech.org to modify your license.
65 | kISpeechErrorCodeNoAPIAccess = 997, // *
66 | kISpeechErrorCodeUnsupportedOutputType = 998, // *
67 | kISpeechErrorCodeInvalidRequest = 999, // *
68 | kISpeechErrorCodeTrialPeriodExceeded = 100, // This evaluation account has exceeded its trial period. Please contact iSpeech sales at +1 (917) 338-7723 or at sales@ispeech.org to upgrade your license.
69 | kISpeechErrorCodeAPIKeyDisabled = 101, // Your key has been disabled. Please contact iSpeech sales at +1 (917) 338-7723 or at sales@ispeech.org to modify your license.
70 | kISpeechErrorCodeInvalidRequestMethod = 1000, // *
71 |
72 | // Error code 300 was "UserCancelled", but that has been wrapped into the SDK and is no longer used.
73 | kISpeechErrorCodeNoInputAvailable = 301, // You wanted to do speech recognition, but there's no mic available.
74 | kISpeechErrorCodeNoInternetConnection = 302, // There's no connection to the cloud to do speech synthesis or speech recognition.
75 | kISpeechErrorCodeSDKIsBusy = 303, // The SDK is busy doing either recognition or synthesis.
76 | kISpeechErrorCodeSDKInterrupted = 304, // The SDK was in the middle of doing something, and then got an audio session interruption
77 | kISpeechErrorCodeCouldNotActiveAudioSession = 305, // Unable to activate the audio session. Can happen when another audio session has higher precedence than ours does.
78 | kISpeechErrorCodeCouldNotStartAudioQueue = 306, // Unable to start an audio queue. Can happen when another audio queue has higher precedence than ours does.
79 | kISpeechErrorCodeServerDied = 307, // Server Died error. mediaserverd has died, and we need to clear out all our audio objects and start fresh.
80 | kISpeechErrorCodeLostInput = 308, // There was audio input, and speech recognition was happening, and then the audio input went away for some reason.
81 | kISpeechErrorCodeBadHost = 309, // The SSL Certificate chain was invalid, probably a result of some redirect away from iSpeech's servers. An example of this happening is when connected to a WiFi network that requires authentication before sending network requests.
82 |
83 | kISpeechErrorCodeUnknownError = 399
84 | };
85 |
86 | typedef NSUInteger iSpeechErrorCode;
87 |
88 | @class iSpeechSDK;
89 |
90 | /**
91 | * iSpeechSDKDelegate has optional methods to be notified when things happen on the SDK. Currently only notifies when an audio session interruption begins and ends.
92 | */
93 | @protocol iSpeechSDKDelegate
94 |
95 | @optional
96 |
97 | /**
98 | * The audio session has been interrupted. See [Responding to Audio Session Interruptions](https://developer.apple.com/library/ios/#documentation/Audio/Conceptual/AudioSessionProgrammingGuide/Cookbook/Cookbook.html#//apple_ref/doc/uid/TP40007875-CH6-SW7) in the [Audio Session Programming Guide](https://developer.apple.com/library/ios/#documentation/Audio/Conceptual/AudioSessionProgrammingGuide/Introduction/Introduction.html).
99 | *
100 | * @param sdk The shared instance of the SDK.
101 | */
102 | - (void)iSpeechSDKDidBeginInterruption:(iSpeechSDK *)sdk;
103 |
104 | /**
105 | * The interupption on the audio session has ended. See [Responding to Audio Session Interruptions](https://developer.apple.com/library/ios/#documentation/Audio/Conceptual/AudioSessionProgrammingGuide/Cookbook/Cookbook.html#//apple_ref/doc/uid/TP40007875-CH6-SW7) in the [Audio Session Programming Guide](https://developer.apple.com/library/ios/#documentation/Audio/Conceptual/AudioSessionProgrammingGuide/Introduction/Introduction.html).
106 | *
107 | * @param sdk The shared instance of the SDK.
108 | */
109 | - (void)iSpeechSDKDidEndInterruption:(iSpeechSDK *)sdk;
110 |
111 | @end
112 |
113 | /**
114 | * The shared SDK class. Configuration of the SDK happens on this object, as well as getting the configuration object to set global configuration for ISSpeechSynthesis and ISSpeechRecognition objects.
115 | */
116 | @interface iSpeechSDK : NSObject
117 |
118 | /** @name Configuring the SDK Instance */
119 |
120 | /**
121 | * Whether the SDK should use the Mobile Development Server (`YES`) or the Mobile Production Server (`NO`). This is set to `NO` by default.
122 | */
123 | @property (nonatomic, assign) BOOL usesDevServer;
124 |
125 | /**
126 | * Whether the SDK should vibrate on the Start Recording and Stop Recording prompts.
127 | *
128 | * These are off by default (`NO`) and will need to be turned on by setting this to `YES`.
129 | */
130 | @property (nonatomic, assign) BOOL vibrateOnPrompts;
131 |
132 | /**
133 | * Whether the SDK should play Success and Fail prompts on a successful or unsuccesful recognition.
134 | *
135 | * These are off by default (`NO`) and will need to be turned on by setting this to `YES`.
136 | */
137 | @property (nonatomic, assign) BOOL playsSuccessAndFailPrompts;
138 |
139 | /**
140 | * Allows you to tell the SDK whether or not it should deactivate the audio session once it's finished its stuff. If you're doing your own audio stuff in the app (such as playing music, an audiobook, etc.), you'd use this to make sure that your audio doesn't go away once the SDK finishes its speech synthesis or speech recognition.
141 | */
142 | @property (nonatomic, assign) BOOL shouldDeactivateAudioSessionWhenFinished;
143 |
144 | /**
145 | * Any extra server params you want to send to the server.
146 | *
147 | * Use only if directed.
148 | */
149 | @property (nonatomic, copy) NSString *extraServerParams;
150 |
151 | /**
152 | * Sets the APIKey to send to the server.
153 | *
154 | * The best place to set this is once in your `-applicationDidFinishLaunching:` method on your app delegate. Once set, you shoudn't have a reason to change it.
155 | */
156 | @property (nonatomic, copy) NSString *APIKey;
157 |
158 | /** @name Setting and Getting the Delegate */
159 |
160 | /**
161 | * Set the delegate to be notified of audio session interruptions.
162 | *
163 | * The delegate must adopt the `` protocol.
164 | */
165 | @property (nonatomic, unsafe_unretained) id delegate;
166 |
167 | /** @name SDK Properties */
168 |
169 | /**
170 | * Returns whether the SDK is currently busy doing something, such as performing speech recognition or speech synthesis.
171 | */
172 | @property (nonatomic, assign, readonly) BOOL isBusy;
173 |
174 | /**
175 | * Returns the version number of the SDK. Useful for debugging purposes and bug reports.
176 | */
177 | @property (nonatomic, copy, readonly) NSString *version;
178 |
179 | /** @name Getting the SDK Instance */
180 |
181 | /**
182 | * The single instance of the iSpeechSDK class.
183 | *
184 | * @return Returns the shared instance of the SDK.
185 | */
186 | + (iSpeechSDK *)sharedSDK;
187 |
188 | /** @name Getting the Configuration Instance */
189 |
190 | /**
191 | * Method to get the configuration object to set properties globally for all objects. For example, if you wanted to set the voice for all speech recognition requests, you'd call `[[[iSpeechSDK sharedSDK] configuration] setVoice:VOICE_HERE]` and all subsequent speech recognition requests would use that voice.
192 | *
193 | * @return Returns the configuration proxy.
194 | */
195 | - (id )configuration;
196 |
197 | /** @name Resetting the SDK */
198 |
199 | /**
200 | * If you get a lot of 303 errors, even though you know for a fact that the SDK isn't doing anything, call this method to reset the SDK's internals.
201 | *
202 | * Configuration properties set, including your API key, and anything sent to `[[iSpeechSDK sharedSDK] configuration]` will not be affected by this call. The delegate for any active speech synthesis or speech recognition will get a `kISpeechErrorCodeServerDied` error code.
203 | *
204 | * @warning This is a temporary fix and will be removed for the final 1.0 relase of the SDK.
205 | */
206 | - (void)resetSDK;
207 |
208 | // The following methods are provided in the event that you initialize the audio session before the SDK has a chance to. If you do, you MUST call these methods in your interruption listener, otherwise the SDK WILL break.
209 |
210 | /** @name Interruption Handling */
211 |
212 | /**
213 | * Tells the SDK that an interruption has begun. If you initialize the audio session before the SDK, you must call this method to ensure that the SDK does not break.
214 | */
215 | - (void)beginInterruption;
216 |
217 | /**
218 | * Tells the SDK that an interruption has ended. If you initialize the audio session before the SDK, you must call this method to ensure that the SDK does not break.
219 | */
220 | - (void)endInterruption;
221 |
222 | @end
223 |
--------------------------------------------------------------------------------
/src/ios/SpeechRecognition.h:
--------------------------------------------------------------------------------
1 | #import
2 | #import "ISpeechSDK.h"
3 | #import
4 |
5 | @interface SpeechRecognition : CDVPlugin
6 |
7 | @property (nonatomic, strong) CDVInvokedUrlCommand * command;
8 | @property (nonatomic, strong) CDVPluginResult* pluginResult;
9 | @property (nonatomic, strong) ISSpeechRecognition* iSpeechRecognition;
10 | @property (nonatomic, strong) SFSpeechRecognizer *sfSpeechRecognizer;
11 | @property (nonatomic, strong) AVAudioEngine *audioEngine;
12 | @property (nonatomic, strong) SFSpeechAudioBufferRecognitionRequest *recognitionRequest;
13 | @property (nonatomic, strong) SFSpeechRecognitionTask *recognitionTask;
14 |
15 | - (void) init:(CDVInvokedUrlCommand*)command;
16 | - (void) start:(CDVInvokedUrlCommand*)command;
17 | - (void) stop:(CDVInvokedUrlCommand*)command;
18 | - (void) abort:(CDVInvokedUrlCommand*)command;
19 |
20 | @end
21 |
--------------------------------------------------------------------------------
/src/ios/SpeechRecognition.m:
--------------------------------------------------------------------------------
1 | //
2 | // Created by jcesarmobile on 30/11/14.
3 | //
4 | //
5 |
6 | #import "SpeechRecognition.h"
7 | #import "ISpeechSDK.h"
8 | #import
9 |
10 | @implementation SpeechRecognition
11 |
12 | - (void) init:(CDVInvokedUrlCommand*)command
13 | {
14 | NSString * key = [self.commandDelegate.settings objectForKey:[@"apiKey" lowercaseString]];
15 | if (!key) {
16 | key = @"developerdemokeydeveloperdemokey";
17 | }
18 | iSpeechSDK *sdk = [iSpeechSDK sharedSDK];
19 | sdk.APIKey = key;
20 | self.iSpeechRecognition = [[ISSpeechRecognition alloc] init];
21 | self.audioEngine = [[AVAudioEngine alloc] init];
22 | }
23 |
24 | - (void) start:(CDVInvokedUrlCommand*)command
25 | {
26 | self.command = command;
27 | NSMutableDictionary * event = [[NSMutableDictionary alloc]init];
28 | [event setValue:@"start" forKey:@"type"];
29 | self.pluginResult = [CDVPluginResult resultWithStatus:CDVCommandStatus_OK messageAsDictionary:event];
30 | [self.pluginResult setKeepCallbackAsBool:YES];
31 | [self.commandDelegate sendPluginResult:self.pluginResult callbackId:self.command.callbackId];
32 | [self recognize];
33 |
34 | }
35 |
36 | - (void) recognize
37 | {
38 | NSString * lang = [self.command argumentAtIndex:0];
39 | if (lang && [lang isEqualToString:@"en"]) {
40 | lang = @"en-US";
41 | }
42 |
43 | if (NSClassFromString(@"SFSpeechRecognizer")) {
44 |
45 | if (![self permissionIsSet]) {
46 | [SFSpeechRecognizer requestAuthorization:^(SFSpeechRecognizerAuthorizationStatus status){
47 | dispatch_async(dispatch_get_main_queue(), ^{
48 |
49 | if (status == SFSpeechRecognizerAuthorizationStatusAuthorized) {
50 | [self recordAndRecognizeWithLang:lang];
51 | } else {
52 | [self sendErrorWithMessage:@"Permission not allowed" andCode:4];
53 | }
54 |
55 | });
56 | }];
57 | } else {
58 | [self recordAndRecognizeWithLang:lang];
59 | }
60 | } else {
61 | [self.iSpeechRecognition setDelegate:self];
62 | [self.iSpeechRecognition setLocale:lang];
63 | [self.iSpeechRecognition setFreeformType:ISFreeFormTypeDictation];
64 | NSError *error;
65 | if(![self.iSpeechRecognition listenAndRecognizeWithTimeout:10 error:&error]) {
66 | NSLog(@"ERROR: %@", error);
67 | }
68 | }
69 | }
70 |
71 | - (void) recordAndRecognizeWithLang:(NSString *) lang
72 | {
73 | NSLocale *locale = [[NSLocale alloc] initWithLocaleIdentifier:lang];
74 | self.sfSpeechRecognizer = [[SFSpeechRecognizer alloc] initWithLocale:locale];
75 | if (!self.sfSpeechRecognizer) {
76 | [self sendErrorWithMessage:@"The language is not supported" andCode:7];
77 | } else {
78 |
79 | // Cancel the previous task if it's running.
80 | if ( self.recognitionTask ) {
81 | [self.recognitionTask cancel];
82 | self.recognitionTask = nil;
83 | }
84 |
85 | [self initAudioSession];
86 |
87 | self.recognitionRequest = [[SFSpeechAudioBufferRecognitionRequest alloc] init];
88 | self.recognitionRequest.shouldReportPartialResults = [[self.command argumentAtIndex:1] boolValue];
89 |
90 | self.recognitionTask = [self.sfSpeechRecognizer recognitionTaskWithRequest:self.recognitionRequest resultHandler:^(SFSpeechRecognitionResult *result, NSError *error) {
91 |
92 | if (error) {
93 | NSLog(@"error");
94 | [self stopAndRelease];
95 | [self sendErrorWithMessage:error.localizedFailureReason andCode:error.code];
96 | }
97 |
98 | if (result) {
99 | NSMutableArray * alternatives = [[NSMutableArray alloc] init];
100 | int maxAlternatives = [[self.command argumentAtIndex:2] intValue];
101 | for ( SFTranscription *transcription in result.transcriptions ) {
102 | if (alternatives.count < maxAlternatives) {
103 | float confMed = 0;
104 | for ( SFTranscriptionSegment *transcriptionSegment in transcription.segments ) {
105 | NSLog(@"transcriptionSegment.confidence %f", transcriptionSegment.confidence);
106 | confMed +=transcriptionSegment.confidence;
107 | }
108 | NSMutableDictionary * resultDict = [[NSMutableDictionary alloc]init];
109 | [resultDict setValue:transcription.formattedString forKey:@"transcript"];
110 | [resultDict setValue:[NSNumber numberWithBool:result.isFinal] forKey:@"final"];
111 | [resultDict setValue:[NSNumber numberWithFloat:confMed/transcription.segments.count]forKey:@"confidence"];
112 | [alternatives addObject:resultDict];
113 | }
114 | }
115 | [self sendResults:@[alternatives]];
116 | if ( result.isFinal ) {
117 | [self stopAndRelease];
118 | }
119 | }
120 | }];
121 |
122 | AVAudioFormat *recordingFormat = [self.audioEngine.inputNode outputFormatForBus:0];
123 |
124 | [self.audioEngine.inputNode installTapOnBus:0 bufferSize:1024 format:recordingFormat block:^(AVAudioPCMBuffer * _Nonnull buffer, AVAudioTime * _Nonnull when) {
125 | [self.recognitionRequest appendAudioPCMBuffer:buffer];
126 | }],
127 |
128 | [self.audioEngine prepare];
129 | [self.audioEngine startAndReturnError:nil];
130 | }
131 | }
132 |
133 | - (void) initAudioSession
134 | {
135 | AVAudioSession *audioSession = [AVAudioSession sharedInstance];
136 | [audioSession setCategory:AVAudioSessionCategoryRecord error:nil];
137 | [audioSession setMode:AVAudioSessionModeMeasurement error:nil];
138 | [audioSession setActive:YES withOptions:AVAudioSessionSetActiveOptionNotifyOthersOnDeactivation error:nil];
139 | }
140 |
141 | - (BOOL) permissionIsSet
142 | {
143 | SFSpeechRecognizerAuthorizationStatus status = [SFSpeechRecognizer authorizationStatus];
144 | return status != SFSpeechRecognizerAuthorizationStatusNotDetermined;
145 | }
146 |
147 | - (void)recognition:(ISSpeechRecognition *)speechRecognition didGetRecognitionResult:(ISSpeechRecognitionResult *)result
148 | {
149 | NSMutableDictionary * resultDict = [[NSMutableDictionary alloc]init];
150 | [resultDict setValue:result.text forKey:@"transcript"];
151 | [resultDict setValue:[NSNumber numberWithBool:YES] forKey:@"final"];
152 | [resultDict setValue:[NSNumber numberWithFloat:result.confidence]forKey:@"confidence"];
153 | NSArray * alternatives = @[resultDict];
154 | NSArray * results = @[alternatives];
155 | [self sendResults:results];
156 |
157 | }
158 |
159 | -(void) recognition:(ISSpeechRecognition *)speechRecognition didFailWithError:(NSError *)error
160 | {
161 | if (error.code == 28 || error.code == 23) {
162 | [self sendErrorWithMessage:[error localizedDescription] andCode:7];
163 | }
164 | }
165 |
166 | -(void) sendResults:(NSArray *) results
167 | {
168 | NSMutableDictionary * event = [[NSMutableDictionary alloc]init];
169 | [event setValue:@"result" forKey:@"type"];
170 | [event setValue:nil forKey:@"emma"];
171 | [event setValue:nil forKey:@"interpretation"];
172 | [event setValue:results forKey:@"results"];
173 |
174 | self.pluginResult = [CDVPluginResult resultWithStatus:CDVCommandStatus_OK messageAsDictionary:event];
175 | [self.pluginResult setKeepCallbackAsBool:YES];
176 | [self.commandDelegate sendPluginResult:self.pluginResult callbackId:self.command.callbackId];
177 | }
178 |
179 | -(void) sendErrorWithMessage:(NSString *)errorMessage andCode:(NSInteger) code
180 | {
181 | NSMutableDictionary * event = [[NSMutableDictionary alloc]init];
182 | [event setValue:@"error" forKey:@"type"];
183 | [event setValue:[NSNumber numberWithInteger:code] forKey:@"error"];
184 | [event setValue:errorMessage forKey:@"message"];
185 | self.pluginResult = [CDVPluginResult resultWithStatus:CDVCommandStatus_ERROR messageAsDictionary:event];
186 | [self.pluginResult setKeepCallbackAsBool:NO];
187 | [self.commandDelegate sendPluginResult:self.pluginResult callbackId:self.command.callbackId];
188 | }
189 |
190 | -(void) stop:(CDVInvokedUrlCommand*)command
191 | {
192 | [self stopOrAbort];
193 | }
194 |
195 | -(void) abort:(CDVInvokedUrlCommand*)command
196 | {
197 | [self stopOrAbort];
198 | }
199 |
200 | -(void) stopOrAbort
201 | {
202 | if (NSClassFromString(@"SFSpeechRecognizer")) {
203 | if (self.audioEngine.isRunning) {
204 | [self.audioEngine stop];
205 | [self.recognitionRequest endAudio];
206 | }
207 | } else {
208 | [self.iSpeechRecognition cancel];
209 | }
210 | }
211 |
212 | -(void) stopAndRelease
213 | {
214 | [self.audioEngine stop];
215 | [self.audioEngine.inputNode removeTapOnBus:0];
216 | self.recognitionRequest = nil;
217 | self.recognitionTask = nil;
218 | }
219 |
220 | @end
221 |
--------------------------------------------------------------------------------
/src/ios/iSpeechSDK.bundle/ISPopupBackground.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/macdonst/SpeechRecognitionPlugin/8fbc04dcd6abf37ce1c2e186a9811aaa9fa7295c/src/ios/iSpeechSDK.bundle/ISPopupBackground.png
--------------------------------------------------------------------------------
/src/ios/iSpeechSDK.bundle/ISPopupBackground@2x.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/macdonst/SpeechRecognitionPlugin/8fbc04dcd6abf37ce1c2e186a9811aaa9fa7295c/src/ios/iSpeechSDK.bundle/ISPopupBackground@2x.png
--------------------------------------------------------------------------------
/src/ios/iSpeechSDK.bundle/ISPopupButton.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/macdonst/SpeechRecognitionPlugin/8fbc04dcd6abf37ce1c2e186a9811aaa9fa7295c/src/ios/iSpeechSDK.bundle/ISPopupButton.png
--------------------------------------------------------------------------------
/src/ios/iSpeechSDK.bundle/ISPopupButton@2x.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/macdonst/SpeechRecognitionPlugin/8fbc04dcd6abf37ce1c2e186a9811aaa9fa7295c/src/ios/iSpeechSDK.bundle/ISPopupButton@2x.png
--------------------------------------------------------------------------------
/src/ios/iSpeechSDK.bundle/ISPopupButtonPress.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/macdonst/SpeechRecognitionPlugin/8fbc04dcd6abf37ce1c2e186a9811aaa9fa7295c/src/ios/iSpeechSDK.bundle/ISPopupButtonPress.png
--------------------------------------------------------------------------------
/src/ios/iSpeechSDK.bundle/ISPopupButtonPress@2x.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/macdonst/SpeechRecognitionPlugin/8fbc04dcd6abf37ce1c2e186a9811aaa9fa7295c/src/ios/iSpeechSDK.bundle/ISPopupButtonPress@2x.png
--------------------------------------------------------------------------------
/src/ios/iSpeechSDK.bundle/ISPopupDefaultButton.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/macdonst/SpeechRecognitionPlugin/8fbc04dcd6abf37ce1c2e186a9811aaa9fa7295c/src/ios/iSpeechSDK.bundle/ISPopupDefaultButton.png
--------------------------------------------------------------------------------
/src/ios/iSpeechSDK.bundle/ISPopupDefaultButton@2x.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/macdonst/SpeechRecognitionPlugin/8fbc04dcd6abf37ce1c2e186a9811aaa9fa7295c/src/ios/iSpeechSDK.bundle/ISPopupDefaultButton@2x.png
--------------------------------------------------------------------------------
/src/ios/iSpeechSDK.bundle/Info.plist:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/macdonst/SpeechRecognitionPlugin/8fbc04dcd6abf37ce1c2e186a9811aaa9fa7295c/src/ios/iSpeechSDK.bundle/Info.plist
--------------------------------------------------------------------------------
/src/ios/iSpeechSDK.bundle/Thumbs.db:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/macdonst/SpeechRecognitionPlugin/8fbc04dcd6abf37ce1c2e186a9811aaa9fa7295c/src/ios/iSpeechSDK.bundle/Thumbs.db
--------------------------------------------------------------------------------
/src/ios/iSpeechSDK.bundle/ar.lproj/Localizable.strings:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/macdonst/SpeechRecognitionPlugin/8fbc04dcd6abf37ce1c2e186a9811aaa9fa7295c/src/ios/iSpeechSDK.bundle/ar.lproj/Localizable.strings
--------------------------------------------------------------------------------
/src/ios/iSpeechSDK.bundle/cs.lproj/Localizable.strings:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/macdonst/SpeechRecognitionPlugin/8fbc04dcd6abf37ce1c2e186a9811aaa9fa7295c/src/ios/iSpeechSDK.bundle/cs.lproj/Localizable.strings
--------------------------------------------------------------------------------
/src/ios/iSpeechSDK.bundle/da.lproj/Localizable.strings:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/macdonst/SpeechRecognitionPlugin/8fbc04dcd6abf37ce1c2e186a9811aaa9fa7295c/src/ios/iSpeechSDK.bundle/da.lproj/Localizable.strings
--------------------------------------------------------------------------------
/src/ios/iSpeechSDK.bundle/de.lproj/Localizable.strings:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/macdonst/SpeechRecognitionPlugin/8fbc04dcd6abf37ce1c2e186a9811aaa9fa7295c/src/ios/iSpeechSDK.bundle/de.lproj/Localizable.strings
--------------------------------------------------------------------------------
/src/ios/iSpeechSDK.bundle/en.lproj/Localizable.strings:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/macdonst/SpeechRecognitionPlugin/8fbc04dcd6abf37ce1c2e186a9811aaa9fa7295c/src/ios/iSpeechSDK.bundle/en.lproj/Localizable.strings
--------------------------------------------------------------------------------
/src/ios/iSpeechSDK.bundle/es.lproj/Localizable.strings:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/macdonst/SpeechRecognitionPlugin/8fbc04dcd6abf37ce1c2e186a9811aaa9fa7295c/src/ios/iSpeechSDK.bundle/es.lproj/Localizable.strings
--------------------------------------------------------------------------------
/src/ios/iSpeechSDK.bundle/fail.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/macdonst/SpeechRecognitionPlugin/8fbc04dcd6abf37ce1c2e186a9811aaa9fa7295c/src/ios/iSpeechSDK.bundle/fail.wav
--------------------------------------------------------------------------------
/src/ios/iSpeechSDK.bundle/failed.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/macdonst/SpeechRecognitionPlugin/8fbc04dcd6abf37ce1c2e186a9811aaa9fa7295c/src/ios/iSpeechSDK.bundle/failed.png
--------------------------------------------------------------------------------
/src/ios/iSpeechSDK.bundle/failed@2x.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/macdonst/SpeechRecognitionPlugin/8fbc04dcd6abf37ce1c2e186a9811aaa9fa7295c/src/ios/iSpeechSDK.bundle/failed@2x.png
--------------------------------------------------------------------------------
/src/ios/iSpeechSDK.bundle/fi.lproj/Localizable.strings:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/macdonst/SpeechRecognitionPlugin/8fbc04dcd6abf37ce1c2e186a9811aaa9fa7295c/src/ios/iSpeechSDK.bundle/fi.lproj/Localizable.strings
--------------------------------------------------------------------------------
/src/ios/iSpeechSDK.bundle/fr.lproj/Localizable.strings:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/macdonst/SpeechRecognitionPlugin/8fbc04dcd6abf37ce1c2e186a9811aaa9fa7295c/src/ios/iSpeechSDK.bundle/fr.lproj/Localizable.strings
--------------------------------------------------------------------------------
/src/ios/iSpeechSDK.bundle/hu.lproj/Localizable.strings:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/macdonst/SpeechRecognitionPlugin/8fbc04dcd6abf37ce1c2e186a9811aaa9fa7295c/src/ios/iSpeechSDK.bundle/hu.lproj/Localizable.strings
--------------------------------------------------------------------------------
/src/ios/iSpeechSDK.bundle/iSpeechLogo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/macdonst/SpeechRecognitionPlugin/8fbc04dcd6abf37ce1c2e186a9811aaa9fa7295c/src/ios/iSpeechSDK.bundle/iSpeechLogo.png
--------------------------------------------------------------------------------
/src/ios/iSpeechSDK.bundle/iSpeechLogo@2x.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/macdonst/SpeechRecognitionPlugin/8fbc04dcd6abf37ce1c2e186a9811aaa9fa7295c/src/ios/iSpeechSDK.bundle/iSpeechLogo@2x.png
--------------------------------------------------------------------------------
/src/ios/iSpeechSDK.bundle/it.lproj/Localizable.strings:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/macdonst/SpeechRecognitionPlugin/8fbc04dcd6abf37ce1c2e186a9811aaa9fa7295c/src/ios/iSpeechSDK.bundle/it.lproj/Localizable.strings
--------------------------------------------------------------------------------
/src/ios/iSpeechSDK.bundle/ja.lproj/Localizable.strings:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/macdonst/SpeechRecognitionPlugin/8fbc04dcd6abf37ce1c2e186a9811aaa9fa7295c/src/ios/iSpeechSDK.bundle/ja.lproj/Localizable.strings
--------------------------------------------------------------------------------
/src/ios/iSpeechSDK.bundle/ko.lproj/Localizable.strings:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/macdonst/SpeechRecognitionPlugin/8fbc04dcd6abf37ce1c2e186a9811aaa9fa7295c/src/ios/iSpeechSDK.bundle/ko.lproj/Localizable.strings
--------------------------------------------------------------------------------
/src/ios/iSpeechSDK.bundle/microphone.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/macdonst/SpeechRecognitionPlugin/8fbc04dcd6abf37ce1c2e186a9811aaa9fa7295c/src/ios/iSpeechSDK.bundle/microphone.png
--------------------------------------------------------------------------------
/src/ios/iSpeechSDK.bundle/microphone@2x.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/macdonst/SpeechRecognitionPlugin/8fbc04dcd6abf37ce1c2e186a9811aaa9fa7295c/src/ios/iSpeechSDK.bundle/microphone@2x.png
--------------------------------------------------------------------------------
/src/ios/iSpeechSDK.bundle/nb.lproj/Localizable.strings:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/macdonst/SpeechRecognitionPlugin/8fbc04dcd6abf37ce1c2e186a9811aaa9fa7295c/src/ios/iSpeechSDK.bundle/nb.lproj/Localizable.strings
--------------------------------------------------------------------------------
/src/ios/iSpeechSDK.bundle/nl.lproj/Localizable.strings:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/macdonst/SpeechRecognitionPlugin/8fbc04dcd6abf37ce1c2e186a9811aaa9fa7295c/src/ios/iSpeechSDK.bundle/nl.lproj/Localizable.strings
--------------------------------------------------------------------------------
/src/ios/iSpeechSDK.bundle/pl.lproj/Localizable.strings:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/macdonst/SpeechRecognitionPlugin/8fbc04dcd6abf37ce1c2e186a9811aaa9fa7295c/src/ios/iSpeechSDK.bundle/pl.lproj/Localizable.strings
--------------------------------------------------------------------------------
/src/ios/iSpeechSDK.bundle/pt-PT.lproj/Localizable.strings:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/macdonst/SpeechRecognitionPlugin/8fbc04dcd6abf37ce1c2e186a9811aaa9fa7295c/src/ios/iSpeechSDK.bundle/pt-PT.lproj/Localizable.strings
--------------------------------------------------------------------------------
/src/ios/iSpeechSDK.bundle/pt.lproj/Localizable.strings:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/macdonst/SpeechRecognitionPlugin/8fbc04dcd6abf37ce1c2e186a9811aaa9fa7295c/src/ios/iSpeechSDK.bundle/pt.lproj/Localizable.strings
--------------------------------------------------------------------------------
/src/ios/iSpeechSDK.bundle/ru.lproj/Localizable.strings:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/macdonst/SpeechRecognitionPlugin/8fbc04dcd6abf37ce1c2e186a9811aaa9fa7295c/src/ios/iSpeechSDK.bundle/ru.lproj/Localizable.strings
--------------------------------------------------------------------------------
/src/ios/iSpeechSDK.bundle/startRecord.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/macdonst/SpeechRecognitionPlugin/8fbc04dcd6abf37ce1c2e186a9811aaa9fa7295c/src/ios/iSpeechSDK.bundle/startRecord.wav
--------------------------------------------------------------------------------
/src/ios/iSpeechSDK.bundle/stopRecord.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/macdonst/SpeechRecognitionPlugin/8fbc04dcd6abf37ce1c2e186a9811aaa9fa7295c/src/ios/iSpeechSDK.bundle/stopRecord.wav
--------------------------------------------------------------------------------
/src/ios/iSpeechSDK.bundle/success.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/macdonst/SpeechRecognitionPlugin/8fbc04dcd6abf37ce1c2e186a9811aaa9fa7295c/src/ios/iSpeechSDK.bundle/success.png
--------------------------------------------------------------------------------
/src/ios/iSpeechSDK.bundle/success.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/macdonst/SpeechRecognitionPlugin/8fbc04dcd6abf37ce1c2e186a9811aaa9fa7295c/src/ios/iSpeechSDK.bundle/success.wav
--------------------------------------------------------------------------------
/src/ios/iSpeechSDK.bundle/success@2x.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/macdonst/SpeechRecognitionPlugin/8fbc04dcd6abf37ce1c2e186a9811aaa9fa7295c/src/ios/iSpeechSDK.bundle/success@2x.png
--------------------------------------------------------------------------------
/src/ios/iSpeechSDK.bundle/sv.lproj/Localizable.strings:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/macdonst/SpeechRecognitionPlugin/8fbc04dcd6abf37ce1c2e186a9811aaa9fa7295c/src/ios/iSpeechSDK.bundle/sv.lproj/Localizable.strings
--------------------------------------------------------------------------------
/src/ios/iSpeechSDK.bundle/tr.lproj/Localizable.strings:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/macdonst/SpeechRecognitionPlugin/8fbc04dcd6abf37ce1c2e186a9811aaa9fa7295c/src/ios/iSpeechSDK.bundle/tr.lproj/Localizable.strings
--------------------------------------------------------------------------------
/src/ios/iSpeechSDK.bundle/zh-Hans.lproj/Localizable.strings:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/macdonst/SpeechRecognitionPlugin/8fbc04dcd6abf37ce1c2e186a9811aaa9fa7295c/src/ios/iSpeechSDK.bundle/zh-Hans.lproj/Localizable.strings
--------------------------------------------------------------------------------
/src/ios/iSpeechSDK.bundle/zh-Hant.lproj/Localizable.strings:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/macdonst/SpeechRecognitionPlugin/8fbc04dcd6abf37ce1c2e186a9811aaa9fa7295c/src/ios/iSpeechSDK.bundle/zh-Hant.lproj/Localizable.strings
--------------------------------------------------------------------------------
/src/ios/libiSpeechSDK.a:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/macdonst/SpeechRecognitionPlugin/8fbc04dcd6abf37ce1c2e186a9811aaa9fa7295c/src/ios/libiSpeechSDK.a
--------------------------------------------------------------------------------
/www/SpeechGrammar.js:
--------------------------------------------------------------------------------
1 | var SpeechGrammar = function() {
2 | this.src;
3 | this.weight;
4 | };
5 |
6 | module.exports = SpeechGrammar;
7 |
--------------------------------------------------------------------------------
/www/SpeechGrammarList.js:
--------------------------------------------------------------------------------
1 | var SpeechGrammarList = function(data) {
2 | this._list = data;
3 | this.length = this._list.length;
4 | };
5 |
6 | SpeechGrammarList.prototype.item = function(item) {
7 | return this._list[item];
8 | };
9 |
10 | SpeechGrammarList.prototype.addFromUri = function(item) {
11 | };
12 |
13 | SpeechGrammarList.prototype.addFromString = function(item) {
14 | };
15 |
16 | module.exports = SpeechGrammarList;
17 |
--------------------------------------------------------------------------------
/www/SpeechRecognition.js:
--------------------------------------------------------------------------------
1 | var exec = require("cordova/exec");
2 |
3 | /**
4 | attribute SpeechGrammarList grammars;
5 | attribute DOMString lang;
6 | attribute boolean continuous;
7 | attribute boolean interimResults;
8 | attribute unsigned long maxAlternatives;
9 | attribute DOMString serviceURI;
10 | */
11 | var SpeechRecognition = function () {
12 | this.grammars = null;
13 | this.lang = "en";
14 | this.continuous = false;
15 | this.interimResults = false;
16 | this.maxAlternatives = 1;
17 | this.serviceURI = "";
18 |
19 | // event methods
20 | this.onaudiostart = null;
21 | this.onsoundstart = null;
22 | this.onspeechstart = null;
23 | this.onspeechend = null;
24 | this.onsoundend = null;
25 | this.onaudioend = null;
26 | this.onresult = null;
27 | this.onnomatch = null;
28 | this.onerror = null;
29 | this.onstart = null;
30 | this.onend = null;
31 |
32 | exec(function() {
33 | console.log("initialized");
34 | }, function(e) {
35 | console.log("error: " + e);
36 | }, "SpeechRecognition", "init", []);
37 | };
38 |
39 | SpeechRecognition.prototype.start = function() {
40 | var that = this;
41 | var successCallback = function(event) {
42 | if (event.type === "audiostart" && typeof that.onaudiostart === "function") {
43 | that.onaudiostart(event);
44 | } else if (event.type === "soundstart" && typeof that.onsoundstart === "function") {
45 | that.onsoundstart(event);
46 | } else if (event.type === "speechstart" && typeof that.onspeechstart === "function") {
47 | that.onspeechstart(event);
48 | } else if (event.type === "speechend" && typeof that.onspeechend === "function") {
49 | that.onspeechend(event);
50 | } else if (event.type === "soundend" && typeof that.onsoundend === "function") {
51 | that.onsoundend(event);
52 | } else if (event.type === "audioend" && typeof that.onaudioend === "function") {
53 | that.onaudioend(event);
54 | } else if (event.type === "result" && typeof that.onresult === "function") {
55 | that.onresult(event);
56 | } else if (event.type === "nomatch" && typeof that.onnomatch === "function") {
57 | that.onnomatch(event);
58 | } else if (event.type === "start" && typeof that.onstart === "function") {
59 | that.onstart(event);
60 | } else if (event.type === "end" && typeof that.onend === "function") {
61 | that.onend(event);
62 | }
63 | };
64 | var errorCallback = function(err) {
65 | if (typeof that.onerror === "function") {
66 | that.onerror(err);
67 | }
68 | };
69 |
70 | exec(successCallback, errorCallback, "SpeechRecognition", "start", [this.lang, this.interimResults, this.maxAlternatives]);
71 | };
72 |
73 | SpeechRecognition.prototype.stop = function() {
74 | exec(null, null, "SpeechRecognition", "stop", []);
75 | };
76 |
77 | SpeechRecognition.prototype.abort = function() {
78 | exec(null, null, "SpeechRecognition", "abort", []);
79 | };
80 |
81 | module.exports = SpeechRecognition;
82 |
--------------------------------------------------------------------------------
/www/SpeechRecognitionAlternative.js:
--------------------------------------------------------------------------------
1 | var SpeechRecognitionAlternative = function() {
2 | this.transcript = null;
3 | this.confidence = 0.0;
4 | };
5 |
6 | module.exports = SpeechRecognitionAlternative;
7 |
--------------------------------------------------------------------------------
/www/SpeechRecognitionError.js:
--------------------------------------------------------------------------------
1 | var SpeechRecognitionError = function() {
2 | this.error = null;
3 | this.message = null;
4 | };
5 |
6 | SpeechRecognitionError['no-speech'] = 0;
7 | SpeechRecognitionError['aborted'] = 1;
8 | SpeechRecognitionError['audio-capture'] = 2;
9 | SpeechRecognitionError['network'] = 3;
10 | SpeechRecognitionError['not-allowed'] = 4;
11 | SpeechRecognitionError['service-not-allowed'] = 5;
12 | SpeechRecognitionError['bad-grammar'] = 6;
13 | SpeechRecognitionError['language-not-supported'] = 7;
14 |
15 | module.exports = SpeechRecognitionError;
16 |
--------------------------------------------------------------------------------
/www/SpeechRecognitionEvent.js:
--------------------------------------------------------------------------------
1 | var SpeechRecognitionEvent = function() {
2 | this.resultIndex;
3 | this.results;
4 | this.interpretation;
5 | this.emma;
6 | };
7 |
8 | module.exports = SpeechRecognitionEvent;
9 |
--------------------------------------------------------------------------------
/www/SpeechRecognitionResult.js:
--------------------------------------------------------------------------------
1 | // A complete one-shot simple response
2 | var SpeechRecognitionResult = function() {
3 | this._result = [];
4 | this.length = 0;
5 | this.final = false;
6 | };
7 |
8 | SpeechRecognitionResult.prototype.item = function(item) {
9 | return this._result[item];
10 | };
11 |
12 | module.exports = SpeechRecognitionResult;
13 |
--------------------------------------------------------------------------------
/www/SpeechRecognitionResultList.js:
--------------------------------------------------------------------------------
1 | // A collection of responses (used in continuous mode)
2 | var SpeechRecognitionResultList = function() {
3 | this._result = [];
4 | this.length = 0;
5 | };
6 |
7 | SpeechRecognitionResultList.prototype.item = function(item) {
8 | return this._result[item];
9 | };
10 |
11 | module.exports = SpeechRecognitionResultList;
12 |
--------------------------------------------------------------------------------
/www/browser/SpeechRecognition.js:
--------------------------------------------------------------------------------
1 | if (!window.SpeechRecognition && window.webkitSpeechRecognition) {
2 | window.SpeechRecognition = window.webkitSpeechRecognition;
3 | }
4 |
5 | if (!window.SpeechRecognitionError && window.webkitSpeechRecognitionError) {
6 | window.SpeechRecognitionError = window.webkitSpeechRecognitionError;
7 | }
8 |
9 | if (!window.SpeechRecognitionEvent && window.webkitSpeechRecognitionEvent) {
10 | window.SpeechRecognitionEvent = window.webkitSpeechRecognitionEvent;
11 | }
12 |
13 | if (!window.SpeechGrammar && window.webkitSpeechGrammar) {
14 | window.SpeechGrammar = window.webkitSpeechGrammar;
15 | }
16 |
17 | if (!window.SpeechGrammarList && window.webkitSpeechGrammarList) {
18 | window.SpeechGrammarList = window.webkitSpeechGrammarList;
19 | SpeechGrammarList.prototype.addFromURI = window.SpeechGrammarList.prototype.addFromUri;
20 | }
--------------------------------------------------------------------------------