├── .cfignore ├── .gitignore ├── .travis.yml ├── .vscode └── settings.json ├── ACKNOWLEDGEMENTS.md ├── CONTRIBUTING.md ├── LICENSE ├── MAINTAINERS.md ├── README.md ├── client ├── .eslintrc ├── .gitignore ├── README.md ├── package.json ├── public │ ├── favicon.ico │ ├── index.html │ └── manifest.json └── src │ ├── App.css │ ├── App.js │ ├── App.test.js │ ├── Routes.js │ ├── components │ ├── AlertDismissable.js │ ├── AppliedRoute.js │ ├── AuthenticatedRoute.js │ ├── LoadButton.css │ ├── LoadButton.js │ └── UnauthenticatedRoute.js │ ├── config.js │ ├── index.css │ ├── index.js │ ├── pages │ ├── Audio.css │ ├── Audio.js │ ├── Corpora.css │ ├── Corpora.js │ ├── Home.css │ ├── Home.js │ ├── Login.css │ ├── Login.js │ ├── NotFound.css │ ├── NotFound.js │ ├── Train.css │ ├── Train.js │ ├── Transcribe.css │ ├── Transcribe.js │ ├── Words.css │ ├── Words.js │ └── util.js │ └── serviceWorker.js ├── cmd ├── README.md ├── add_audio.py ├── add_corpus.py ├── create_acoustic_model.py ├── create_language_model.py ├── delete_acoustic_model.py ├── delete_audio.py ├── delete_corpus.py ├── delete_language_model.py ├── env.py ├── list_acoustic_model.py ├── list_all_models.py ├── list_audio.py ├── list_corpus.py ├── list_language_model.py ├── requirements.txt ├── reset_language_model.py ├── train_acoustic_model.py ├── train_language_model.py └── transcribe.py ├── data ├── convert_rtf.py └── fixup.sed ├── doc ├── cloud-deploy.md └── source │ └── images │ ├── architecture.png │ ├── custom-word-list.png │ ├── main-page.png │ ├── re-train.png │ └── training-panel.png ├── manifest.yml ├── model └── user.json ├── package.json ├── server ├── controllers │ ├── api.ts │ └── user.ts ├── routes │ └── api.ts ├── server.ts ├── types │ └── cfenv.d.ts └── util.ts ├── services.sample.json ├── tsconfig.json └── tslint.json /.cfignore: -------------------------------------------------------------------------------- 1 | node_modules/ 2 | client/node_modules/ 3 | *.DS_Store 4 | README.md 5 | .github/ 6 | .git/ 7 | .gitignore 8 | logs 9 | *.log 10 | data/ 11 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Logs 2 | logs 3 | *.log 4 | npm-debug.log* 5 | yarn-debug.log* 6 | yarn-error.log* 7 | *.pyc 8 | 9 | # Runtime data 10 | pids 11 | *.pid 12 | *.seed 13 | *.pid.lock 14 | 15 | # Directory for instrumented libs generated by jscoverage/JSCover 16 | lib-cov 17 | 18 | # Coverage directory used by tools like istanbul 19 | coverage 20 | 21 | # nyc test coverage 22 | .nyc_output 23 | 24 | # Grunt intermediate storage (http://gruntjs.com/creating-plugins#storing-task-files) 25 | .grunt 26 | 27 | # Bower dependency directory (https://bower.io/) 28 | bower_components 29 | 30 | # node-waf configuration 31 | .lock-wscript 32 | 33 | # Compiled binary addons (https://nodejs.org/api/addons.html) 34 | build/Release 35 | 36 | # Dependency directories 37 | node_modules/ 38 | jspm_packages/ 39 | 40 | # TypeScript v1 declaration files 41 | typings/ 42 | 43 | # Optional npm cache directory 44 | .npm 45 | 46 | # Optional eslint cache 47 | .eslintcache 48 | 49 | # Optional REPL history 50 | .node_repl_history 51 | 52 | # Output of 'npm pack' 53 | *.tgz 54 | 55 | # Yarn Integrity file 56 | .yarn-integrity 57 | 58 | # dotenv environment variables file 59 | .env 60 | 61 | # next.js build output 62 | .next 63 | 64 | .DS_Store 65 | dist/ 66 | .yalc/ 67 | yalc.lock 68 | package-lock.json 69 | services.json 70 | 71 | # data directories 72 | data/Audio 73 | data/Documents 74 | data/*.zip 75 | data/*.txt 76 | data/*.input 77 | data/*.corpus 78 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: node_js 2 | node_js: 3 | - "8" 4 | before_script: 5 | - npm run build 6 | script: 7 | - npm run lint 8 | cache: 9 | directories: 10 | - node_modules 11 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "editor.tabSize": 2, 3 | "editor.insertSpaces": true, 4 | "files.trimTrailingWhitespace": true, 5 | "files.insertFinalNewline": true, 6 | "editor.rulers": [80], 7 | "files.exclude": { 8 | "**/.git": true, 9 | "**/.DS_Store": true, 10 | "**/node_modules": true 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /ACKNOWLEDGEMENTS.md: -------------------------------------------------------------------------------- 1 | ## Acknowledgements 2 | 3 | * Credit for the development of this code pattern goes to:\ 4 | [Ton Ngo](https://github.com/tonanhngo)\ 5 | [Paul Van Eck](https://github.com/pvaneck)\ 6 | [Yihong Wang](https://github.com/yhwang)\ 7 | [Ted Chang](https://github.com/tedhtchang) 8 | 9 | * The audio and medical data used in this code pattern was provided by [ezDI](https://www.ezdi.com). 10 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | This is an open source project, and we appreciate your help! 4 | 5 | We use the GitHub issue tracker to discuss new features and non-trivial bugs. 6 | 7 | In addition to the issue tracker, [#journeys on 8 | Slack](https://dwopen.slack.com) is the best way to get into contact with the 9 | project's maintainers. 10 | 11 | To contribute code, documentation, or tests, please submit a pull request to 12 | the GitHub repository. Generally, we expect two maintainers to review your pull 13 | request before it is approved for merging. For more details, see the 14 | [MAINTAINERS](MAINTAINERS.md) page. 15 | 16 | Contributions are subject to the [Developer Certificate of Origin, Version 1.1](https://developercertificate.org/) and the [Apache License, Version 2](https://www.apache.org/licenses/LICENSE-2.0.txt). -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /MAINTAINERS.md: -------------------------------------------------------------------------------- 1 | # Maintainers Guide 2 | 3 | This guide is intended for maintainers - anybody with commit access to one or 4 | more Code Pattern repositories. 5 | 6 | ## Methodology 7 | 8 | This repository does not have a traditional release management cycle, but 9 | should instead be maintained as a useful, working, and polished reference at 10 | all times. While all work can therefore be focused on the master branch, the 11 | quality of this branch should never be compromised. 12 | 13 | The remainder of this document details how to merge pull requests to the 14 | repositories. 15 | 16 | ## Merge approval 17 | 18 | The project maintainers use LGTM (Looks Good To Me) in comments on the pull 19 | request to indicate acceptance prior to merging. A change requires LGTMs from 20 | two project maintainers. If the code is written by a maintainer, the change 21 | only requires one additional LGTM. 22 | 23 | ## Reviewing Pull Requests 24 | 25 | We recommend reviewing pull requests directly within GitHub. This allows a 26 | public commentary on changes, providing transparency for all users. When 27 | providing feedback be civil, courteous, and kind. Disagreement is fine, so long 28 | as the discourse is carried out politely. If we see a record of uncivil or 29 | abusive comments, we will revoke your commit privileges and invite you to leave 30 | the project. 31 | 32 | During your review, consider the following points: 33 | 34 | ### Does the change have positive impact? 35 | 36 | Some proposed changes may not represent a positive impact to the project. Ask 37 | whether or not the change will make understanding the code easier, or if it 38 | could simply be a personal preference on the part of the author (see 39 | [bikeshedding](https://en.wiktionary.org/wiki/bikeshedding)). 40 | 41 | Pull requests that do not have a clear positive impact should be closed without 42 | merging. 43 | 44 | ### Do the changes make sense? 45 | 46 | If you do not understand what the changes are or what they accomplish, ask the 47 | author for clarification. Ask the author to add comments and/or clarify test 48 | case names to make the intentions clear. 49 | 50 | At times, such clarification will reveal that the author may not be using the 51 | code correctly, or is unaware of features that accommodate their needs. If you 52 | feel this is the case, work up a code sample that would address the pull 53 | request for them, and feel free to close the pull request once they confirm. 54 | 55 | ### Does the change introduce a new feature? 56 | 57 | For any given pull request, ask yourself "is this a new feature?" If so, does 58 | the pull request (or associated issue) contain narrative indicating the need 59 | for the feature? If not, ask them to provide that information. 60 | 61 | Are new unit tests in place that test all new behaviors introduced? If not, do 62 | not merge the feature until they are! Is documentation in place for the new 63 | feature? (See the documentation guidelines). If not do not merge the feature 64 | until it is! Is the feature necessary for general use cases? Try and keep the 65 | scope of any given component narrow. If a proposed feature does not fit that 66 | scope, recommend to the user that they maintain the feature on their own, and 67 | close the request. You may also recommend that they see if the feature gains 68 | traction among other users, and suggest they re-submit when they can show such 69 | support. 70 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Build Status](https://api.travis-ci.org/IBM/Train-Custom-Speech-Model.svg?branch=master)](https://travis-ci.org/IBM/Train-Custom-Speech-Model) 2 | 3 | # Create a custom Watson Speech to Text model using specialized domain data 4 | 5 | In this code pattern, we will create a custom speech to text model. The `Watson Speech to Text` service is among the best in the industry. However, like other Cloud speech services, it was trained with general conversational speech for general use; therefore it may not perform well in specialized domains such as medicine, law, sports, etc. To improve the accuracy of the speech-to-text service, you can leverage transfer learning by training the existing AI model with new data from your domain. 6 | 7 | In this example, we will use a medical speech data set to illustrate the process. The data is provided by [ezDI](https://www.ezdi.com) and includes 16 hours of medical dictation in both audio and text files. 8 | 9 | When the reader has completed this code pattern, they will understand how to: 10 | 11 | * Prepare audio data and transcription text for training a speech-to-text model. 12 | * Work with the `Watson Speech to Text` service through API calls. 13 | * Train a custom speech-to-text model with a data set. 14 | * Enhance the model with continuous user feedback. 15 | 16 | ![architecture](doc/source/images/architecture.png) 17 | 18 | ## Flow 19 | 20 | 1. The user downloads the custom medical dictation data set from [ezDI](https://www.ezdi.com) and prepares the audio and text data for training. 21 | 1. The user interacts with the Watson Speech to Text service via the provided application UI or by executing command line Python scripts. 22 | 1. The user requests the custom data be used to create and train a language and acoustic Watson Speech to Text model. 23 | 1. The user interactively tests the new custom model by submitting audio files and verifying the text transcription returned from the model. 24 | 1. If the text transcription is not correct, the user can make corrections and resubmit the updated data for additional training. 25 | 1. Several users can work on the same custom model at the same time. 26 | 27 | ## Included components 28 | 29 | * [IBM Watson Speech to Text](https://www.ibm.com/watson/services/speech-to-text): easily convert audio and voice into written text for quick understanding of content. 30 | 31 | ## Featured technologies 32 | 33 | * [Node.js](https://nodejs.org/): An open-source JavaScript run-time environment for executing server-side JavaScript code. 34 | * [React](https://reactjs.org/): A JavaScript library for building User Interfaces. 35 | * [Watson Speech recognition](https://console.bluemix.net/docs/services/speech-to-text/getting-started.html#gettingStarted): Advanced models for processing audio signals and language context can accurately transcribe spoken voice into text. 36 | * [Watson Speech customization](https://console.bluemix.net/docs/services/speech-to-text/custom.html#customization): Ability to further train the model to improve the accuracy for your special domain. 37 | * [AI in medical services](https://www.ezdi.com): Save time for medical care providers by automating tasks such as entering data into Electronic Medical Record. 38 | 39 | # Watch the Video 40 | 41 | [![video](https://i.ytimg.com/vi/bBLu1Ap8c7c/hqdefault.jpg)](https://youtu.be/bBLu1Ap8c7c) 42 | 43 | # Steps 44 | 45 | 1. [Clone the repo](#1-clone-the-repo) 46 | 1. [Create IBM Cloud services](#2-create-ibm-cloud-services) 47 | 1. [Configure credentials](#3-configure-credentials) 48 | 1. [Download and prepare the data](#4-download-and-prepare-the-data) 49 | 1. [Train the models](#5-train-the-models) 50 | 1. [Transcribe your dictation](#6-transcribe-your-dictation) 51 | 1. [Correct the transcription](#7-correct-the-transcription) 52 | 53 | ## 1. Clone the repo 54 | 55 | ```bash 56 | git clone https://github.com/IBM/Train-Custom-Speech-Model 57 | ``` 58 | 59 | ## 2. Create IBM Cloud services 60 | 61 | Create the following services: 62 | 63 | * [**Watson Speech To Text**](https://cloud.ibm.com/catalog/services/speech-to-text) 64 | 65 | > Note: In order to perform customization, you will need to select the `Standard` paid plan. 66 | 67 | ## 3. Configure credentials 68 | 69 | From your **Watson Speech to Text** service instance, select the `Service Credentials` tab. 70 | 71 | If no credentials exist, select the `New Credential` button to create a new set of credentials. 72 | 73 | Save off the `apikey` and `url` values as they will be needed in future steps. 74 | 75 | ## 4. Download and prepare the data 76 | 77 | Download the [ezDI Medical Dictation Dataset](https://ezdi-datasets.s3.us.cloud-object-storage.appdomain.cloud/ezDI-Medical-Dictation-Dataset.zip) which is a zip file containing both the audio and text files. 78 | 79 | Extract the zip file, moving the `Documents` and `Audio` directories into the `data` directory located at the root of this project. 80 | 81 | The structure should look like: 82 | 83 | ``` 84 | Train-Custom-Speech-Model 85 | |__ data 86 | |__ Audio 87 | | |__ 1.wav 88 | | |__ ... 89 | |__ Documents 90 | |__ 1.rtf 91 | |__ ... 92 | ``` 93 | The transcription files stored in the `Documents` directory will be in **rtf** format, and need to be converted to plain text. You can use the `convert_rtf.py` Python script to convert them all to **txt** files. Run the following code block from the `data` directory to create a virtual environment, install dependencies, and run the conversion script. Note, you must have Python 3. 94 | 95 | ```bash 96 | python3 -m venv .venv 97 | source .venv/bin/activate 98 | pip install striprtf 99 | python convert_rtf.py 100 | ``` 101 | 102 | The data needs careful preparation since our deep learning model will only be as good as the data used in the training. Preparation may include steps such as removing erroneous words in the text, bad audio recordings, etc. These steps are typically very time-consuming when dealing with large datasets. 103 | 104 | Although the dataset from `ezDI` is already curated, a quick scan of the text transcription files will reveal some filler text that would not help the training. These unwanted text strings have been collected in the file [data/fixup.sed](data/fixup.sed) and can be removed from the text files by using the *sed* utility. 105 | 106 | Also, for the purpose of training, we will need to combine all text files into a single package, called a `corpus` file. 107 | 108 | To remove the unwanted text strings and to combine all of the text files into a single `corpus` file, perform the following command: 109 | 110 | ```bash 111 | sed -f fixup.sed Documents/*.txt > corpus-1.txt 112 | ``` 113 | 114 | For the audio files, we can archive them as zip or tar files. Since the `Watson Speech to Text` API has a limit of 100MB per archive file, we will need to split up the audio files into 3 zip files. We will also set aside the first 5 audio files for testing. 115 | 116 | ```bash 117 | zip audio-set1.zip -xi Audio/[6-9].wav Audio/[1-7][0-9].wav 118 | zip audio-set2.zip -xi Audio/[8-9][0-9].wav Audio/1[0-6][0-9].wav 119 | zip audio-set3.zip -xi Audio/1[7-9][0-9].wav Audio/2[0-4][0-9].wav 120 | ``` 121 | 122 | ## 5. Train the models 123 | 124 | To train the language and acoustic models, you can either run the application or use the command line interface. Or you can mix as desired, since both are working with the same data files and services. 125 | 126 | ### a. Run the application 127 | 128 | The application is a *nodejs* web service running locally with a GUI implemented in *React*. 129 | 130 | * Install [Node.js](https://nodejs.org/en/) runtime or NPM. 131 | 132 | To allow the web service to connect to your **Watson Speech to Text** service, create in the root directory a file named `services.json` by copying the sample file `services.sample.json`. Update the `apikey` and 'url' fields in the newly created file with your own values that were retrieved in [Step 3](#3-configure-credentials). 133 | 134 | ```json 135 | { 136 | "services": { 137 | "code-pattern-custom-language-model": [ 138 | { 139 | "credentials": { 140 | "apikey": "", 141 | "url": "" 142 | }, 143 | "label": "speech_to_text", 144 | "name": "code-pattern-custom-language-model" 145 | } 146 | ] 147 | } 148 | } 149 | ``` 150 | 151 | The application will require a local login. The local user accounts are defined in the file [model/user.json](model/user.json). The pre-defined user/passwords are `user1/user1` and `user2/user2`. The `langModel` and `acousticModel` fields are the names of your custom language and acoustic models which will be created upon logging in if they do not already exist. You can change the `baseModel` field if the base model you are working with is different from our default. Here is an example of user3 using Korean as base language for transcribing. See [Supported language models](https://cloud.ibm.com/docs/services/speech-to-text?topic=speech-to-text-models#modelsList). 152 | 153 | ```json 154 | { 155 | "user3": { 156 | "password": "user3", 157 | "langModel": "custom-korean-language", 158 | "acousticModel": "custom-korean-acoustic", 159 | "baseModel": "ko-KR_NarrowbandModel" 160 | } 161 | } 162 | ``` 163 | 164 | Install and start the application by running the following commands in the root directory: 165 | 166 | ```bash 167 | npm install 168 | npm run dev 169 | ``` 170 | 171 | The local nodejs web server will automatically open your browser to [http://localhost:3000](http://localhost:3000). 172 | 173 | ![main-page](doc/source/images/main-page.png) 174 | 175 | Before training the model, you must add the corpus and audio files. The files can be uploaded using the panels displayed in the `Corpora` and `Audio` tabs of the application UI. 176 | 177 | Then select the `Train` tab to show the training options. Train both the `Language Model` and `Acoustic Model`. 178 | 179 | ![training-status](doc/source/images/training-panel.png) 180 | 181 | > Note: Training the acoustic model can potentially take hours to complete. 182 | 183 | ### b. Use the Command Line interface 184 | 185 | If you prefer to use the command line, set the following environment variables. Update the `` and `` values with the values retrieved in [Step 3](#3-configure-credentials). 186 | 187 | ```bash 188 | export USERNAME=apikey 189 | export PASSWORD= 190 | export STT_ENDPOINT= 191 | ``` 192 | 193 | To keep all of the generated data files in the proper directory, set the current directory to `data` before executing any of the following commands: 194 | 195 | ```bash 196 | cd data 197 | ``` 198 | 199 | > Note: For a more detailed description of the available commands, see the [README](cmd/README.md) located in the `cmd` directory. 200 | 201 | #### Install dependencies 202 | 203 | The Python scripts use the package *requests*. If you don't have it already, install it with: 204 | 205 | ```bash 206 | pip install requests 207 | ``` 208 | 209 | #### Train the language model 210 | 211 | To create your custom language model and your corpus of medical dictation, run: 212 | 213 | ```bash 214 | python ../cmd/create_language_model.py "custom-model-1" 215 | ``` 216 | 217 | Note that we are naming our acoustic model "custom-model-1", just to be consistent with the default name that will be used by the application if logged in as `user1`. 218 | 219 | This script will return the ID of your custom model. Use it to set the following environment variable: 220 | 221 | ```bash 222 | export LANGUAGE_ID= 223 | ``` 224 | 225 | > Note: You can also obtain the ID by using the following command: 226 | > 227 | >```bash 228 | >python ../cmd/list_language_model.py 229 | >``` 230 | 231 | The custom model will stay in the *"pending"* state until a corpus of text is added. Add the medical transcription file we created in an earlier step. 232 | 233 | ```bash 234 | python ../cmd/add_corpus.py corpus-1.txt 235 | python ../cmd/list_corpus.py 236 | ``` 237 | 238 | This step will also save a new list of `Out-Of-Vocabulary` words in a file (the file will be created in current directory and will end in `OOVs.corpus`). `Out-Of-Vocabulary` words are words that are not a part of the basic Watson Speech-to-Text service, but will be added and used to train the language model. It may be useful to check the words in the file to see if there are any unexpected words that you don't want to train the model with. 239 | 240 | The status of the custom language model should now be set to *"ready"*. Now we can train the language model using the medical transcription. 241 | 242 | ```bash 243 | python ../cmd/train_language_model.py 244 | ``` 245 | 246 | Training is asynchronous and may take some time depending on the system workload. 247 | You can check for completion with `cmd/list_language_model.py`. When training is complete, the status will change from *"training"* to *"available"*. 248 | 249 | #### Train the acoustic model 250 | 251 | Create the custom acoustic model based on the custom language model. 252 | 253 | > Note: Since the audio files are sampled at the 8Khz rate, we will need to create a narrow band model, which is coded in the `create_acoustic_model.py` python script. 254 | 255 | ```bash 256 | python ../cmd/create_acoustic_model.py "acoustic-model-1" 257 | ``` 258 | 259 | Note that we are naming our acoustic model "acoustic-model-1", just to be consistent with the default name that will be used by the application if logged in as `user1`. 260 | 261 | This script will return the ID of your custom acoustic model. Use it to set the following environment variable: 262 | 263 | ```bash 264 | export ACOUSTIC_ID= 265 | ``` 266 | 267 | The custom acoustic model will be in the *"pending"* state until some audio data is added. Add the 3 zip files containing the audio clips with the following commands: 268 | 269 | ```bash 270 | python ../cmd/add_audio.py audio-set1.zip 271 | python ../cmd/add_audio.py audio-set2.zip 272 | python ../cmd/add_audio.py audio-set3.zip 273 | python ../cmd/list_audio.py 274 | ``` 275 | 276 | > Note: it may take some time to process each audio file. If processing is not completed yet, the command will return a *409* error message; in this case, simply retry later. 277 | 278 | When the status of the custom acoustic model is set to *"ready"*, you can start the training by running: 279 | 280 | ```bash 281 | python ../cmd/train_acoustic_model.py 282 | ``` 283 | 284 | Training the acoustic model is asynchronous and can potentially take hours to complete. To determine when training is completed, you can query the model and check if the status has changed from *"training"* to *"available"*. 285 | 286 | ```bash 287 | python ../cmd/list_acoustic_model.py 288 | ``` 289 | 290 | ## 6. Transcribe your dictation 291 | 292 | To try out the model, either create your own recorded medical dictation in wav format (use 8KHz sampling rate), or use one of the first 5 test wav files located in `/data/Audio` (remember, we left those out of the data set used to train the model). 293 | 294 | If running the application, click on the `Transcribe` tab and then browse to your wav file. You can select any combination of base or custom model for language and acoustic. Using custom model for both should give the best result. 295 | 296 | If using the command line, enter the following: 297 | 298 | ```bash 299 | python ../cmd/transcribe.py 300 | ``` 301 | 302 | Similarly to the application, you can set or unset the environment variables `LANGUAGE_ID` and `ACOUSTIC_ID` to select any combination of base or custom model for language and acoustic. If the corresponding variable is unset, the base model will be used. The transcription will be displayed on the terminal as well as written to a file with the same name as the audio file but with the file extension `.transcript`. 303 | 304 | ## 7. Correct the transcription 305 | 306 | If you detect errors in the transcribed text, you can re-train the models by submitting corrected transcriptions. 307 | 308 | If using the application, from the `Transcribe` panel, correct the transribed text. 309 | 310 | ![re-train](doc/source/images/re-train.png) 311 | 312 | If the audio file being transcribed is not already included in the acoustic model, check the `Add audio file to acoustic model` checkbox. 313 | 314 | Enter a corpus name, and hit `Submit`. 315 | 316 | The language and acoustic models will be re-trained with the new files. 317 | 318 | If using the command line, you can directly edit the transcription output file generated in the previous step. You can then add the corrected text as a new corpus, and add the audio file as a new audio source. 319 | 320 | > Note: If correcting multiple transcriptions, it will be more efficient to aggregate the corrected text files and audio clips before re-training the models. (See [Step #4](#4-download-and-prepare-the-data) for examples on how to aggregate the files, and [Step #5](#5-train-the-models) for how to re-train the models using the command line) 321 | 322 | # Sample output 323 | 324 | * The main GUI screen: 325 | 326 | ![main-page](doc/source/images/main-page.png) 327 | 328 | * Status for training of the models: 329 | 330 | ![training-status](doc/source/images/training-panel.png) 331 | 332 | * List of "Out of Vocabulary" words determined during training: 333 | 334 | ![custom-words](doc/source/images/custom-word-list.png) 335 | 336 | > Note: These are the words that are not a part of the base `Watson Speech to Text` service, but will be added to the language model. 337 | 338 | # Links 339 | 340 | * [Demo on Youtube](https://www.youtube.com/watch?v=bBLu1Ap8c7c): Watch the video 341 | 342 | # Troubleshooting 343 | 344 | * Error: Please set your username in the environment variable USERNAME. If you use IAM service credentials, set USERNAME set to the string "apikey" and set PASSWORD to the value of your IAM API key. 345 | 346 | > If you choose to use the command line, make sure you set up your environment variables. 347 | 348 | * *409* error message. 349 | 350 | > This indicates the service is busy. Try the command again later. 351 | 352 | * Error uploading the audio files: 353 | 354 | > Since the audio files are large (70-90MB), you may encounter error when uploading them because of unstable network connection. In this case, you can break up the files into smaller files and upload them. The training for the acoustic model will work the same way. 355 | 356 | For example, the command to zip the first audio file as described above: 357 | 358 | ```bash 359 | zip audio-set1.zip -xi Audio/[6-9].wav Audio/[1-7][0-9].wav 360 | ``` 361 | 362 | To break up into two smaller files, adjust the regular expression as appropriate: 363 | 364 | ```bash 365 | zip audio-set1a.zip -xi Audio/[6-9].wav Audio/[1-3][0-9].wav 366 | zip audio-set1b.zip -xi Audio/[4-7][0-9].wav 367 | ``` 368 | 369 | # Deploy on IBM Cloud 370 | 371 | Instructions for deploying the web application on Cloud Foundry can be found [here](doc/cloud-deploy.md). 372 | 373 | # Learn more 374 | 375 | * **Artificial Intelligence Code Patterns**: Enjoyed this Code Pattern? Check out our other [AI Code Patterns](https://developer.ibm.com/technologies/artificial-intelligence/) 376 | * **AI and Data Code Pattern Playlist**: Bookmark our [playlist](https://www.youtube.com/playlist?list=PLzUbsvIyrNfknNewObx5N7uGZ5FKH0Fde) with all of our Code Pattern videos 377 | * **With Watson**: Want to take your Watson app to the next level? Looking to utilize Watson Brand assets? [Join the With Watson program](https://www.ibm.com/watson/with-watson/) to leverage exclusive brand, marketing, and tech resources to amplify and accelerate your Watson embedded commercial solution. 378 | 379 | # License 380 | 381 | This code pattern is licensed under the Apache Software License, Version 2. Separate third party code objects invoked within this code pattern are licensed by their respective providers pursuant to their own separate licenses. Contributions are subject to the [Developer Certificate of Origin, Version 1.1 (DCO)](https://developercertificate.org/) and the [Apache Software License, Version 2](https://www.apache.org/licenses/LICENSE-2.0.txt). 382 | 383 | [Apache Software License (ASL) FAQ](https://www.apache.org/foundation/license-faq.html#WhatDoesItMEAN) 384 | -------------------------------------------------------------------------------- /client/.eslintrc: -------------------------------------------------------------------------------- 1 | { 2 | "plugins": [ 3 | "react" 4 | ], 5 | "parserOptions": { 6 | "ecmaFeatures": { 7 | "jsx": true 8 | } 9 | }, 10 | "extends": [ 11 | "eslint:recommended", 12 | "react-app" 13 | ], 14 | "rules": { 15 | "semi": 2, 16 | "no-trailing-spaces": 2, 17 | "max-len": ["error", { "code": 100 }], 18 | "no-console": 1, 19 | "quotes": ["error", "single"] 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /client/.gitignore: -------------------------------------------------------------------------------- 1 | # See https://help.github.com/articles/ignoring-files/ for more about ignoring files. 2 | 3 | # dependencies 4 | /node_modules 5 | /.pnp 6 | .pnp.js 7 | 8 | # testing 9 | /coverage 10 | 11 | # production 12 | /build 13 | 14 | # misc 15 | .DS_Store 16 | .env.local 17 | .env.development.local 18 | .env.test.local 19 | .env.production.local 20 | 21 | npm-debug.log* 22 | yarn-debug.log* 23 | yarn-error.log* 24 | -------------------------------------------------------------------------------- /client/README.md: -------------------------------------------------------------------------------- 1 | This client was bootstrapped with [Create React App](https://github.com/facebook/create-react-app). 2 | 3 | ## Run React App with Express API Server 4 | 5 | Since this React app relies on the Express API backend server, it is best to run the following from 6 | the project's root directory (one directory up): 7 | 8 | ```bash 9 | npm run dev 10 | ``` 11 | 12 | This will start both the API server and the React app frontend. 13 | 14 | ## Project Structure 15 | 16 | All handled routes are listed in `src/Routers.js`. 17 | Each individual route has its own JavaScript and CSS file in the `src/pages` directory. 18 | Reusable components are stored in the `src/components` directory. These are often imported 19 | and used by the page scripts. 20 | 21 | Some basic config options are also listed in `src/config.js`. Currently the only two config 22 | options are: 23 | 24 | * `API_ENDPOINT`: The URL endpoint of the server hosting the API. 25 | * `MAX_AUDIO_SIZE`: The maximum size of audio files that users can upload for transcription. 26 | * `BASE_STT_MODEL`: The base model the custom models are built using. This option is used on 27 | the Transcribe page for users choosing not to use their customized models. 28 | 29 | 30 | ## Available Scripts 31 | 32 | In the project directory, you can run: 33 | 34 | ### `npm start` 35 | 36 | Runs the app in the development mode.
37 | Open [http://localhost:3000](http://localhost:3000) to view it in the browser. 38 | 39 | The page will reload if you make edits.
40 | You will also see any lint errors in the console. 41 | 42 | ### `npm test` 43 | 44 | Launches the test runner in the interactive watch mode.
45 | See the section about [running tests](https://facebook.github.io/create-react-app/docs/running-tests) for more information. 46 | 47 | ### `npm run build` 48 | 49 | Builds the app for production to the `build` folder.
50 | It correctly bundles React in production mode and optimizes the build for the best performance. 51 | 52 | The build is minified and the filenames include the hashes.
53 | Your app is ready to be deployed! 54 | 55 | See the section about [deployment](https://facebook.github.io/create-react-app/docs/deployment) for more information. 56 | 57 | ### `npm run eject` 58 | 59 | **Note: this is a one-way operation. Once you `eject`, you can’t go back!** 60 | 61 | If you aren’t satisfied with the build tool and configuration choices, you can `eject` at any time. This command will remove the single build dependency from your project. 62 | 63 | Instead, it will copy all the configuration files and the transitive dependencies (Webpack, Babel, ESLint, etc) right into your project so you have full control over them. All of the commands except `eject` will still work, but they will point to the copied scripts so you can tweak them. At this point you’re on your own. 64 | 65 | You don’t have to ever use `eject`. The curated feature set is suitable for small and middle deployments, and you shouldn’t feel obligated to use this feature. However we understand that this tool wouldn’t be useful if you couldn’t customize it when you are ready for it. 66 | 67 | ## Learn More 68 | 69 | You can learn more in the [Create React App documentation](https://facebook.github.io/create-react-app/docs/getting-started). 70 | 71 | To learn React, check out the [React documentation](https://reactjs.org/). 72 | -------------------------------------------------------------------------------- /client/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "speech-app", 3 | "version": "0.1.0", 4 | "private": true, 5 | "dependencies": { 6 | "react": "^16.6.0", 7 | "react-bootstrap": "^0.32.4", 8 | "react-dom": "^16.6.0", 9 | "react-router-bootstrap": "^0.24.4", 10 | "react-router-dom": "^4.3.1", 11 | "react-scripts": "2.1.1" 12 | }, 13 | "scripts": { 14 | "start": "react-scripts start", 15 | "build": "react-scripts build", 16 | "test": "react-scripts test", 17 | "eject": "react-scripts eject", 18 | "lint": "eslint ./src" 19 | }, 20 | "browserslist": [ 21 | ">0.2%", 22 | "not dead", 23 | "not ie <= 11", 24 | "not op_mini all" 25 | ] 26 | } 27 | -------------------------------------------------------------------------------- /client/public/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IBM/Train-Custom-Speech-Model/b58173699b336fe29273daace9a0041a7bcf80cf/client/public/favicon.ico -------------------------------------------------------------------------------- /client/public/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 15 | 16 | 25 | Watson Speech to Text Customizer 26 | 27 | 28 | 31 |
32 | 42 | 43 | 44 | -------------------------------------------------------------------------------- /client/public/manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | "short_name": "STT Customizer", 3 | "name": "Watson Speech to Text Customizer", 4 | "icons": [ 5 | { 6 | "src": "favicon.ico", 7 | "sizes": "64x64 32x32 24x24 16x16", 8 | "type": "image/x-icon" 9 | } 10 | ], 11 | "start_url": ".", 12 | "display": "standalone", 13 | "theme_color": "#000000", 14 | "background_color": "#ffffff" 15 | } 16 | -------------------------------------------------------------------------------- /client/src/App.css: -------------------------------------------------------------------------------- 1 | .App { 2 | margin-top: 15px; 3 | } 4 | 5 | .App .navbar-brand { 6 | font-weight: bold; 7 | } 8 | -------------------------------------------------------------------------------- /client/src/App.js: -------------------------------------------------------------------------------- 1 | import React, { Component, Fragment } from 'react'; 2 | import { Link, withRouter } from 'react-router-dom'; 3 | import { Nav, Navbar, NavItem } from 'react-bootstrap'; 4 | import { LinkContainer } from 'react-router-bootstrap'; 5 | import Routes from './Routes'; 6 | import config from './config'; 7 | import './App.css'; 8 | import { handleFetchNonOK } from './pages/util'; 9 | 10 | 11 | class App extends Component { 12 | constructor(props) { 13 | super(props); 14 | 15 | this.state = { 16 | isAuthenticated: false, 17 | isAuthenticating: true, 18 | }; 19 | } 20 | 21 | async componentDidMount() { 22 | fetch(`${config.API_ENDPOINT}/user`, { 23 | method: 'GET', 24 | credentials: 'include', 25 | }) 26 | .then(handleFetchNonOK) 27 | .then((response) => { 28 | this.userHasAuthenticated(true); 29 | response.json().then((data) => { 30 | localStorage.setItem('username', data.user.username); 31 | localStorage.setItem('customLanguageModel', data.user.langModel); 32 | localStorage.setItem('customAcousticModel', data.user.acousticModel); 33 | localStorage.setItem('baseModel', data.user.baseModel); 34 | }); 35 | this.setState({ isAuthenticating: false }); 36 | }) 37 | .catch((err) => { 38 | console.log('Not logged in.', err.message); 39 | this.setState({ isAuthenticating: false }); 40 | }); 41 | } 42 | 43 | userHasAuthenticated = (authenticated) => { 44 | this.setState({ isAuthenticated: authenticated }); 45 | } 46 | 47 | handleLogout = (event) => { 48 | fetch(`${config.API_ENDPOINT}/logout`, { 49 | method: 'POST', 50 | credentials: 'include', 51 | }) 52 | .then(handleFetchNonOK) 53 | .then((response) => { 54 | this.userHasAuthenticated(false); 55 | localStorage.clear(); 56 | const { history } = this.props; 57 | history.push('/'); 58 | }) 59 | .catch((err) => { 60 | console.log('Error logging out user.', err.message); 61 | }); 62 | } 63 | 64 | render() { 65 | const { isAuthenticated } = this.state; 66 | const childProps = { 67 | isAuthenticated: isAuthenticated, 68 | userHasAuthenticated: this.userHasAuthenticated, 69 | }; 70 | 71 | return ( 72 | !this.state.isAuthenticating && 73 |
74 | 75 | 76 | 77 | Watson STT Customizer 78 | 79 | 80 | 81 | 82 | 99 | 114 | 115 | 116 | 117 |
118 | ); 119 | } 120 | } 121 | 122 | export default withRouter(App); 123 | -------------------------------------------------------------------------------- /client/src/App.test.js: -------------------------------------------------------------------------------- 1 | import React from 'react'; 2 | import ReactDOM from 'react-dom'; 3 | import App from './App'; 4 | 5 | it('renders without crashing', () => { 6 | const div = document.createElement('div'); 7 | ReactDOM.render(, div); 8 | ReactDOM.unmountComponentAtNode(div); 9 | }); 10 | -------------------------------------------------------------------------------- /client/src/Routes.js: -------------------------------------------------------------------------------- 1 | import React from 'react'; 2 | import { Route, Switch } from 'react-router-dom'; 3 | import AppliedRoute from './components/AppliedRoute'; 4 | import AuthenticatedRoute from './components/AuthenticatedRoute'; 5 | import UnauthenticatedRoute from './components/UnauthenticatedRoute'; 6 | import Home from './pages/Home'; 7 | import NotFound from './pages/NotFound'; 8 | import Login from './pages/Login'; 9 | import Transcribe from './pages/Transcribe.js'; 10 | import Corpora from './pages/Corpora.js'; 11 | import Words from './pages/Words.js'; 12 | import Audio from './pages/Audio.js'; 13 | import Train from './pages/Train.js'; 14 | 15 | export default ({ childProps }) => 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | ; 26 | -------------------------------------------------------------------------------- /client/src/components/AlertDismissable.js: -------------------------------------------------------------------------------- 1 | import React, { Component } from 'react'; 2 | import { Alert } from 'react-bootstrap'; 3 | 4 | /** 5 | * Class to handle the rendering of a dismissiable alert to use for things like errors. 6 | * @extends React.Component 7 | */ 8 | class AlertDismissable extends Component { 9 | 10 | render() { 11 | if (this.props.show) { 12 | return ( 13 | 14 |

{this.props.title}

15 |

16 | {this.props.message} 17 |

18 |
19 | ); 20 | } 21 | return null; 22 | } 23 | } 24 | 25 | export default AlertDismissable; 26 | -------------------------------------------------------------------------------- /client/src/components/AppliedRoute.js: -------------------------------------------------------------------------------- 1 | import React from 'react'; 2 | import { Route } from 'react-router-dom'; 3 | 4 | /** 5 | * This handles routes where it doesn't matter if the user is logged in or not. 6 | */ 7 | export default ({ component: C, props: cProps, ...rest }) => 8 | } />; 9 | -------------------------------------------------------------------------------- /client/src/components/AuthenticatedRoute.js: -------------------------------------------------------------------------------- 1 | import React from 'react'; 2 | import { Route, Redirect } from 'react-router-dom'; 3 | 4 | /** 5 | * This handles routes/pages where the user must be logged in to view. 6 | */ 7 | export default ({ component: C, props: cProps, ...rest }) => 8 | 11 | cProps.isAuthenticated 12 | ? 13 | : } 17 | />; 18 | -------------------------------------------------------------------------------- /client/src/components/LoadButton.css: -------------------------------------------------------------------------------- 1 | .LoadButton .spinning.glyphicon { 2 | margin-right: 7px; 3 | top: 2px; 4 | animation: spin 1s infinite linear; 5 | } 6 | @keyframes spin { 7 | from { transform: scale(1) rotate(0deg); } 8 | to { transform: scale(1) rotate(360deg); } 9 | } 10 | -------------------------------------------------------------------------------- /client/src/components/LoadButton.js: -------------------------------------------------------------------------------- 1 | import React from 'react'; 2 | import { Button, Glyphicon } from 'react-bootstrap'; 3 | import './LoadButton.css'; 4 | 5 | /** 6 | * This produces a button that will have a loading animation while the isLoading property is true. 7 | */ 8 | export default ({ 9 | isLoading, 10 | text, 11 | loadingText, 12 | className = '', 13 | disabled = false, 14 | ...props 15 | }) => 16 | ; 24 | -------------------------------------------------------------------------------- /client/src/components/UnauthenticatedRoute.js: -------------------------------------------------------------------------------- 1 | import React from 'react'; 2 | import { Route, Redirect } from 'react-router-dom'; 3 | 4 | function querystring(name, url = window.location.href) { 5 | name = name.replace(/[[]]/g, '\\$&'); 6 | 7 | const regex = new RegExp('[?&]' + name + '(=([^&#]*)|&|#|$)', 'i'); 8 | const results = regex.exec(url); 9 | 10 | if (!results) { 11 | return null; 12 | } 13 | if (!results[2]) { 14 | return ''; 15 | } 16 | 17 | return decodeURIComponent(results[2].replace(/\+/g, ' ')); 18 | } 19 | 20 | /** 21 | * This handles routes/pages where the user must NOT be logged in to view. 22 | */ 23 | export default ({ component: C, props: cProps, ...rest }) => { 24 | const redirect = querystring('redirect'); 25 | return ( 26 | 29 | !cProps.isAuthenticated 30 | ? 31 | : } 34 | /> 35 | ); 36 | }; 37 | -------------------------------------------------------------------------------- /client/src/config.js: -------------------------------------------------------------------------------- 1 | export default { 2 | API_ENDPOINT: 'http://localhost:5000/api', 3 | WS_ENDPOINT: 'ws://localhost:5000/', 4 | MAX_AUDIO_SIZE: 15000000, 5 | }; 6 | -------------------------------------------------------------------------------- /client/src/index.css: -------------------------------------------------------------------------------- 1 | body { 2 | margin: 0; 3 | padding: 0; 4 | font-family: "IBM Plx Sans", "Segoe UI", sans-serif; 5 | font-size: 16px; 6 | color: #333; 7 | -webkit-font-smoothing: antialiased; 8 | -moz-osx-font-smoothing: grayscale; 9 | } 10 | 11 | code { 12 | font-family: source-code-pro, Menlo, Monaco, Consolas, "Courier New", 13 | monospace; 14 | } 15 | 16 | select.form-control, 17 | textarea.form-control, 18 | input.form-control { 19 | font-size: 16px; 20 | } 21 | 22 | input[type=file] { 23 | width: 100%; 24 | } 25 | -------------------------------------------------------------------------------- /client/src/index.js: -------------------------------------------------------------------------------- 1 | import React from 'react'; 2 | import ReactDOM from 'react-dom'; 3 | import { BrowserRouter as Router } from 'react-router-dom'; 4 | import './index.css'; 5 | import App from './App'; 6 | import * as serviceWorker from './serviceWorker'; 7 | 8 | ReactDOM.render( 9 | 10 | 11 | , 12 | document.getElementById('root') 13 | ); 14 | 15 | // If you want your app to work offline and load faster, you can change 16 | // unregister() to register() below. Note this comes with some pitfalls. 17 | // Learn more about service workers: http://bit.ly/CRA-PWA 18 | serviceWorker.unregister(); 19 | -------------------------------------------------------------------------------- /client/src/pages/Audio.css: -------------------------------------------------------------------------------- 1 | .Audio { 2 | text-align: center; 3 | margin-top: 20px; 4 | display: block; 5 | margin: 0 auto; 6 | max-width: 900px; 7 | min-width: 350px; 8 | } 9 | 10 | .Audio table { 11 | margin-top: 20px; 12 | } 13 | 14 | .Audio td, th { 15 | text-align: center; 16 | } 17 | 18 | .Audio .tableload.glyphicon { 19 | margin-right: 7px; 20 | margin-top: 20px; 21 | animation: spin 1s infinite linear; 22 | font-size: 42px; 23 | color: #888; 24 | } 25 | 26 | .Audio .processing.glyphicon { 27 | margin-right: 7px; 28 | animation: spin 1s infinite linear; 29 | } 30 | 31 | @keyframes spin { 32 | from { transform: scale(1) rotate(0deg); } 33 | to { transform: scale(1) rotate(360deg); } 34 | } 35 | 36 | .Audio .audiofile { 37 | width: 0.1px; 38 | height: 0.1px; 39 | opacity: 0; 40 | overflow: hidden; 41 | position: absolute; 42 | z-index: -1; 43 | } 44 | 45 | .Audio .audiolabel { 46 | font-size: 16px; 47 | color: #555; 48 | font-weight: 500; 49 | height: 34px; 50 | padding: 6px 12px; 51 | background-color: #fff; 52 | display: inline-block; 53 | cursor: pointer; 54 | border: 1px solid #ccc; 55 | border-radius: 4px; 56 | margin-bottom: 0px; 57 | } 58 | 59 | .Audio .audiolabel:focus, 60 | .Audio .audiolabel:hover { 61 | background-color: #dedede; 62 | } 63 | 64 | .Audio .LoadButton { 65 | width: 200px; 66 | margin: 0 auto; 67 | margin-bottom: 15px; 68 | } 69 | -------------------------------------------------------------------------------- /client/src/pages/Audio.js: -------------------------------------------------------------------------------- 1 | import React, { Component } from 'react'; 2 | import { 3 | Button, Table, Glyphicon, FormGroup, FormControl, ControlLabel, HelpBlock, Panel 4 | } from 'react-bootstrap'; 5 | import LoadButton from '../components/LoadButton'; 6 | import AlertDismissable from '../components/AlertDismissable'; 7 | import config from '../config'; 8 | import './Audio.css'; 9 | import { handleFetchNonOK } from './util'; 10 | 11 | /** 12 | * Class to handle the rendering of the Audio page where a list of all audio resources 13 | * for the user's custom acoustic model is displayed. 14 | * @extends React.Component 15 | */ 16 | export default class Audio extends Component { 17 | constructor(props) { 18 | super(props); 19 | 20 | this.file = null; 21 | 22 | this.state = { 23 | isLoading: false, 24 | audio: [], 25 | uploadError: '', 26 | listError: '', 27 | isDeleting: false, 28 | isUploading: false, 29 | fileUploadOpen: false, 30 | filename: '' 31 | }; 32 | } 33 | 34 | async componentDidMount() { 35 | this.handleGetList(); 36 | this.interval = setInterval(this.pollAudio, 3000); 37 | } 38 | 39 | componentWillUnmount() { 40 | clearInterval(this.interval); 41 | } 42 | 43 | handlePanelToggle = event => { 44 | this.setState({ 'fileUploadOpen': !this.state.fileUploadOpen }); 45 | } 46 | 47 | handleFileChange = event => { 48 | if (event.target.files.length) { 49 | this.file = event.target.files[0]; 50 | this.setState({ 'filename': this.file.name }); 51 | } 52 | } 53 | 54 | handleDismiss = errorType => { 55 | this.setState({ [errorType]: '' }); 56 | } 57 | 58 | uploadAudio = event => { 59 | event.preventDefault(); 60 | 61 | this.setState({ isUploading: true }); 62 | this.setState({ uploadError: '' }); 63 | let formData = new FormData(); 64 | formData.append('audio', this.file); 65 | formData.append('audioName', this.state.filename); 66 | fetch(`${config.API_ENDPOINT}/audio`, { 67 | method: 'POST', 68 | body: formData, 69 | credentials: 'include', 70 | }) 71 | .then(handleFetchNonOK) 72 | .then((response) => { 73 | response.json().then((data) => { 74 | // Start polling audio 75 | this.file = null; 76 | this.handlePanelToggle(); 77 | this.setState({ 'filename': '' }); 78 | this.handleGetList(); 79 | this.interval = setInterval(this.pollAudio, 3000); 80 | this.setState({ isUploading: false }); 81 | }); 82 | }) 83 | .catch((err) => { 84 | this.setState({ uploadError: 85 | `Could not add audio resource: ${err.message}` }); 86 | this.setState({ isUploading: false }); 87 | }); 88 | } 89 | 90 | checkAudioProcessing = () => { 91 | let being_processed = function(element) { 92 | return element.status === 'being_processed'; 93 | }; 94 | return this.state.audio.some(being_processed); 95 | } 96 | 97 | /** 98 | * Sort the given list of audio, first by status, then by name. We sort by status first 99 | * to make sure the audio being processed are listed at the top. 100 | */ 101 | sortAudio = audio => { 102 | audio.sort((a, b) => { 103 | if (a.status === b.status) { 104 | return (a.name < b.name) ? -1 : ((a.name > b.name) ? 1 : 0); 105 | } 106 | return (a.status < b.status) ? -1 : ((a.status > b.status) ? 1 : 0); 107 | }); 108 | return audio; 109 | } 110 | 111 | 112 | pollAudio = async () => { 113 | fetch(`${config.API_ENDPOINT}/audio`, { 114 | method: 'GET', 115 | credentials: 'include' 116 | }) 117 | .then(handleFetchNonOK) 118 | .then((response) => { 119 | response.json().then((data) => { 120 | let sortedAudio = this.sortAudio(data.audio); 121 | this.setState({ audio: sortedAudio }); 122 | if (!this.checkAudioProcessing()) { 123 | clearInterval(this.interval); 124 | } 125 | }); 126 | }) 127 | .catch((err) => { 128 | this.setState({ listError: err.message }); 129 | this.setState({ isLoading: false }); 130 | clearInterval(this.interval); 131 | }); 132 | } 133 | 134 | handleGetList = async () => { 135 | this.setState({ listError: '' }); 136 | this.setState({ isLoading: true }); 137 | fetch(`${config.API_ENDPOINT}/audio`, { 138 | method: 'GET', 139 | credentials: 'include' 140 | }) 141 | .then(handleFetchNonOK) 142 | .then((response) => { 143 | response.json().then((data) => { 144 | let sortedAudio = this.sortAudio(data.audio); 145 | this.setState({ audio: sortedAudio }); 146 | this.setState({ isLoading: false }); 147 | }); 148 | }) 149 | .catch((err) => { 150 | this.setState({ listError: err.message }); 151 | this.setState({ isLoading: false }); 152 | }); 153 | } 154 | 155 | handleDelete = async audioName => { 156 | this.setState({ isDeleting: true }); 157 | fetch(`${config.API_ENDPOINT}/audio/` + audioName, { 158 | method: 'DELETE', 159 | credentials: 'include' 160 | }) 161 | .then(handleFetchNonOK) 162 | .then((response) => { 163 | this.handleGetList(); 164 | this.setState({ isDeleting: false }); 165 | }) 166 | .catch((err) => { 167 | this.setState({ error: err.message }); 168 | this.setState({ isDeleting: false }); 169 | }); 170 | } 171 | 172 | render() { 173 | return ( 174 |
175 |

Audio Resource List

176 |

After a new audio resource has been added and processed, you must initialize 177 | a training session for the custom acoustic model 178 | with the new data. 179 |

180 | 185 | 186 | 187 | Upload Audio Resource{' '} 188 | 189 | { this.state.fileUploadOpen 190 | ? 191 | : 192 | } 193 | 194 | 195 | 196 | 197 | 198 |
199 | 200 | Upload Audio File or Archive
201 | 202 | {' '} 203 | {this.state.filename ? this.state.filename : 'Browse...'} 204 | 205 | 210 | 211 | Accepted file types: .wav, .mp3, .flac, .zip, .tar.gz
212 | Adding multiple audio files via a single archive file is significantly more 213 | efficient than adding each file individually. 214 |
215 |
216 | 225 | 226 | this.handleDismiss('uploadError')} /> 231 |
232 |
233 |
234 | 235 | { this.state.isLoading && } 236 | { !this.state.isLoading && this.state.audio.length <= 0 && !this.state.listError && 237 |


No audio resources

238 | } 239 | { !this.state.isLoading && this.state.audio.length > 0 && 240 | 241 | 242 | 243 | 244 | 245 | 246 | 247 | 248 | 249 | 250 | 251 | {this.state.audio.map((audio, index) => { 252 | return ( 253 | 254 | 255 | 256 | 257 | 267 | 278 | 279 | ); 280 | })} 281 | 282 |
NameTypeDuration (s)StatusActions
{audio.name}{audio.details.type || '-' }{audio.duration || '-'} 258 | 261 | {audio.status}{' '} 262 | {audio.status ==='being_processed' && 263 | 264 | } 265 | 266 | 268 | 277 |
283 | } 284 | this.handleDismiss('listError')} /> 289 |
290 | ); 291 | } 292 | } 293 | -------------------------------------------------------------------------------- /client/src/pages/Corpora.css: -------------------------------------------------------------------------------- 1 | .Corpora { 2 | text-align: center; 3 | margin-top: 20px; 4 | display: block; 5 | margin: 0 auto; 6 | max-width: 900px; 7 | min-width: 350px; 8 | } 9 | 10 | .Corpora table { 11 | margin-top: 20px; 12 | } 13 | 14 | .Corpora td, th { 15 | text-align: center; 16 | } 17 | 18 | .Corpora .tableload.glyphicon { 19 | margin-right: 7px; 20 | margin-top: 20px; 21 | animation: spin 1s infinite linear; 22 | font-size: 42px; 23 | color: #888; 24 | } 25 | 26 | .Corpora .processing.glyphicon { 27 | margin-right: 7px; 28 | animation: spin 1s infinite linear; 29 | } 30 | 31 | @keyframes spin { 32 | from { transform: scale(1) rotate(0deg); } 33 | to { transform: scale(1) rotate(360deg); } 34 | } 35 | 36 | .Corpora .corpusfile { 37 | width: 0.1px; 38 | height: 0.1px; 39 | opacity: 0; 40 | overflow: hidden; 41 | position: absolute; 42 | z-index: -1; 43 | } 44 | 45 | .Corpora .corpuslabel { 46 | font-size: 16px; 47 | color: #555; 48 | font-weight: 500; 49 | height: 34px; 50 | padding: 6px 12px; 51 | background-color: #fff; 52 | display: inline-block; 53 | cursor: pointer; 54 | border: 1px solid #ccc; 55 | border-radius: 4px; 56 | margin-bottom: 0px; 57 | } 58 | 59 | .Corpora .audiolabel:focus, 60 | .Corpora .audiolabel:hover { 61 | background-color: #dedede; 62 | } 63 | 64 | .Corpora .LoadButton { 65 | width: 200px; 66 | margin: 0 auto; 67 | margin-bottom: 15px; 68 | } 69 | -------------------------------------------------------------------------------- /client/src/pages/Corpora.js: -------------------------------------------------------------------------------- 1 | import React, { Component } from 'react'; 2 | import { 3 | Button, Table, Glyphicon, FormGroup, FormControl, ControlLabel, HelpBlock, Panel 4 | } from 'react-bootstrap'; 5 | import LoadButton from '../components/LoadButton'; 6 | import AlertDismissable from '../components/AlertDismissable'; 7 | import config from '../config'; 8 | import './Corpora.css'; 9 | import { handleFetchNonOK } from './util'; 10 | 11 | /** 12 | * Class to handle the rendering of the Corpora page where a list of all corpora for the user's 13 | * custom model is displayed. 14 | * @extends React.Component 15 | */ 16 | export default class Corpora extends Component { 17 | constructor(props) { 18 | super(props); 19 | 20 | this.fileContents = ''; 21 | 22 | this.state = { 23 | isLoading: false, 24 | corpora: [], 25 | uploadError: '', 26 | listError: '', 27 | isDeleting: false, 28 | isUploading: false, 29 | fileUploadOpen: false, 30 | filename: '' 31 | }; 32 | } 33 | 34 | async componentDidMount() { 35 | this.handleGetList(); 36 | this.interval = setInterval(this.pollCorpora, 3000); 37 | } 38 | 39 | componentWillUnmount() { 40 | clearInterval(this.interval); 41 | } 42 | 43 | handlePanelToggle = event => { 44 | this.setState({ 'fileUploadOpen': !this.state.fileUploadOpen }); 45 | } 46 | 47 | handleFileChange = event => { 48 | if (event.target.files.length) { 49 | this.fileReader = new FileReader(); 50 | this.fileReader.onloadend = () => { this.fileContents = this.fileReader.result; }; 51 | this.fileReader.readAsText(event.target.files[0]); 52 | this.setState({ 'filename': event.target.files[0].name }); 53 | } 54 | } 55 | 56 | handleDismiss = errorType => { 57 | this.setState({ [errorType]: '' }); 58 | } 59 | 60 | uploadCorpora = event => { 61 | event.preventDefault(); 62 | this.setState({ isUploading: true }); 63 | this.setState({ uploadError: '' }); 64 | fetch(`${config.API_ENDPOINT}/corpora`, { 65 | method: 'POST', 66 | body: JSON.stringify({'corpusName': this.state.filename, 67 | 'corpus': this.fileContents}), 68 | credentials: 'include', 69 | headers: { 70 | 'Content-Type': 'application/json' 71 | }, 72 | }) 73 | .then(handleFetchNonOK) 74 | .then((response) => { 75 | response.json().then((data) => { 76 | // Start polling corpora 77 | this.fileContents = ''; 78 | this.handlePanelToggle(); 79 | this.setState({ 'filename': '' }); 80 | this.handleGetList(); 81 | this.interval = setInterval(this.pollCorpora, 3000); 82 | this.setState({ isUploading: false }); 83 | }); 84 | }) 85 | .catch((err) => { 86 | this.setState({ uploadError: `Could not add corpus: ${err.message}` }); 87 | this.setState({ isUploading: false }); 88 | }); 89 | } 90 | 91 | checkCorporaProcessing = () => { 92 | let being_processed = function(element) { 93 | return element.status === 'being_processed'; 94 | }; 95 | return this.state.corpora.some(being_processed); 96 | } 97 | 98 | /** 99 | * Sort the given list of corpora, first by status, then by name. We sort by 100 | * status first to make sure the corpora being processed are listed at the 101 | * top. 102 | */ 103 | sortCorpora = corpora => { 104 | corpora.sort((a, b) => { 105 | if (a.status === b.status) { 106 | return (a.name < b.name) ? -1 : ((a.name > b.name) ? 1 : 0); 107 | } 108 | return (a.status > b.status) ? -1 : ((a.status < b.status) ? 1 : 0); 109 | }); 110 | return corpora; 111 | } 112 | 113 | 114 | pollCorpora = async () => { 115 | fetch(`${config.API_ENDPOINT}/corpora`, { 116 | method: 'GET', 117 | credentials: 'include' 118 | }) 119 | .then(handleFetchNonOK) 120 | .then((response) => { 121 | response.json().then((data) => { 122 | let sortedCorpora = this.sortCorpora(data.corpora); 123 | this.setState({ corpora: sortedCorpora }); 124 | if (!this.checkCorporaProcessing()) { 125 | clearInterval(this.interval); 126 | } 127 | }); 128 | }) 129 | .catch((err) => { 130 | this.setState({ listError: err.message }); 131 | this.setState({ isLoading: false }); 132 | clearInterval(this.interval); 133 | }); 134 | } 135 | 136 | handleGetList = async () => { 137 | this.setState({ listError: '' }); 138 | this.setState({ isLoading: true }); 139 | fetch(`${config.API_ENDPOINT}/corpora`, { 140 | method: 'GET', 141 | credentials: 'include' 142 | }) 143 | .then(handleFetchNonOK) 144 | .then((response) => { 145 | response.json().then((data) => { 146 | let sortedCorpora = this.sortCorpora(data.corpora); 147 | this.setState({ corpora: sortedCorpora }); 148 | this.setState({ isLoading: false }); 149 | }); 150 | }) 151 | .catch((err) => { 152 | this.setState({ listError: err.message }); 153 | this.setState({ isLoading: false }); 154 | }); 155 | } 156 | 157 | handleDelete = async corpusName => { 158 | this.setState({ isDeleting: true }); 159 | fetch(`${config.API_ENDPOINT}/corpora/` + corpusName, { 160 | method: 'DELETE', 161 | credentials: 'include' 162 | }) 163 | .then(handleFetchNonOK) 164 | .then((response) => { 165 | this.handleGetList(); 166 | this.setState({ isDeleting: false }); 167 | }) 168 | .catch((err) => { 169 | this.setState({ error: err.message }); 170 | this.setState({ isDeleting: false }); 171 | }); 172 | } 173 | 174 | render() { 175 | return ( 176 |
177 |

Corpus List

178 |

After new corpora have been analyzed, you must initialize 179 | a training session for the custom language model 180 | with the new data. 181 |

182 | 187 | 188 | 189 | Upload Corpus{' '} 190 | 191 | { this.state.fileUploadOpen 192 | ? 193 | : 194 | } 195 | 196 | 197 | 198 | 199 | 200 |
201 | 202 | Upload Corpus Text File
203 | 204 | {' '} 205 | {this.state.filename ? this.state.filename : 'Browse...'} 206 | 207 | 212 | 213 | Accepted file types: .txt 214 | 215 |
216 | 225 | 226 | this.handleDismiss('uploadError')} /> 231 |
232 |
233 |
234 | { this.state.isLoading && } 235 | { !this.state.isLoading && this.state.corpora.length <= 0 && !this.state.listError && 236 |


No corpora added

237 | } 238 | { !this.state.isLoading && this.state.corpora.length > 0 && 239 | 240 | 241 | 242 | 243 | 244 | 245 | 246 | 247 | 248 | 249 | 250 | {this.state.corpora.map((corpus, index) => { 251 | return ( 252 | 253 | 254 | 255 | 256 | 266 | 277 | 278 | ); 279 | })} 280 | 281 |
NameOut of Vocab WordsTotal WordsStatusActions
{corpus.name}{corpus.out_of_vocabulary_words}{corpus.total_words} 257 | 260 | {corpus.status}{' '} 261 | {corpus.status ==='being_processed' && 262 | 263 | } 264 | 265 | 267 | 276 |
282 | } 283 | this.handleDismiss('listError')} /> 288 |
289 | ); 290 | } 291 | } 292 | -------------------------------------------------------------------------------- /client/src/pages/Home.css: -------------------------------------------------------------------------------- 1 | .Home .lander { 2 | text-align: center; 3 | } 4 | 5 | .Home .lander h1 { 6 | font-weight: 600; 7 | } 8 | 9 | .Home .lander p { 10 | color: #999; 11 | } 12 | 13 | .Home .tilecontainer { 14 | max-width: 600px; 15 | } 16 | 17 | .Home .menubutton { 18 | display: block; 19 | max-width: 300px; 20 | min-height: 150px; 21 | background: #444; 22 | padding: 10px; 23 | text-align: center; 24 | border-radius: 5px; 25 | color: white; 26 | font-weight: bold; 27 | margin: 10px auto; 28 | text-decoration: none; 29 | font-size: 24px; 30 | } 31 | 32 | .Home .menubutton:hover { 33 | background: #6e6e6e; 34 | } 35 | 36 | .Home .menubutton:active { 37 | background: #8e8e8e; 38 | } 39 | 40 | .Home .menubutton .glyphicon { 41 | padding-top: 20px; 42 | font-size: 42px; 43 | } 44 | -------------------------------------------------------------------------------- /client/src/pages/Home.js: -------------------------------------------------------------------------------- 1 | import React, { Component } from 'react'; 2 | import { Grid, Row, Col, Glyphicon } from 'react-bootstrap'; 3 | import './Home.css'; 4 | 5 | /** 6 | * Class to handle the rendering of the Home page. 7 | * @extends React.Component 8 | */ 9 | export default class Home extends Component { 10 | render() { 11 | return ( 12 |
13 |
14 |

Watson Speech to Text Customizer

15 |

Use a medical transcription dataset to train a customized speech to text model.

16 |
17 | 18 | 19 | 20 | 21 | Transcribe
22 | 23 |
24 | 25 | 26 | 27 | View Corpora
28 | 29 |
30 | 31 |
32 | 33 | 34 | 35 | View Words
36 | 37 |
38 | 39 | 40 | 41 | Train
42 | 43 |
44 | 45 |
46 |
47 |
48 | ); 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /client/src/pages/Login.css: -------------------------------------------------------------------------------- 1 | @media all and (min-width: 480px) { 2 | .Login { 3 | padding: 50px 0; 4 | } 5 | 6 | .Login form { 7 | margin: 0 auto; 8 | max-width: 360px; 9 | } 10 | 11 | .Login .alert { 12 | margin-top: 15px; 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /client/src/pages/Login.js: -------------------------------------------------------------------------------- 1 | import React, { Component } from 'react'; 2 | import { 3 | Button, FormGroup, FormControl, ControlLabel, InputGroup, Glyphicon 4 | } from 'react-bootstrap'; 5 | import AlertDismissable from '../components/AlertDismissable'; 6 | import config from '../config'; 7 | import './Login.css'; 8 | import { handleFetchNonOK } from './util'; 9 | 10 | /** 11 | * Class to handle the rendering of the Login page. 12 | * @extends React.Component 13 | */ 14 | export default class Login extends Component { 15 | constructor(props) { 16 | super(props); 17 | 18 | this.state = { 19 | username: '', 20 | password: '', 21 | error: '' 22 | }; 23 | } 24 | 25 | validateForm() { 26 | return this.state.username.length > 0 && this.state.password.length > 0; 27 | } 28 | 29 | handleChange = event => { 30 | this.setState({ 31 | [event.target.id]: event.target.value 32 | }); 33 | } 34 | 35 | handleSubmit = async event => { 36 | event.preventDefault(); 37 | fetch(`${config.API_ENDPOINT}/login`, { 38 | method: 'POST', 39 | body: JSON.stringify({'username': this.state.username, 'password': this.state.password}), 40 | credentials: 'include', 41 | headers: { 42 | 'Content-Type': 'application/json' 43 | }, 44 | }) 45 | .then(handleFetchNonOK) 46 | .then((response) => { 47 | response.json().then((data) => { 48 | localStorage.setItem('username', data.user.username); 49 | localStorage.setItem('customLanguageModel', data.user.langModel); 50 | localStorage.setItem('customAcousticModel', data.user.acousticModel); 51 | localStorage.setItem('baseModel', data.user.baseModel); 52 | this.props.userHasAuthenticated(true); 53 | }); 54 | }) 55 | .catch((err) => { 56 | this.setState({ error: `Could not authenticate: ${err.message}` }); 57 | }); 58 | } 59 | 60 | handleDismiss = event => { 61 | this.setState({ error: '' }); 62 | } 63 | 64 | render() { 65 | return ( 66 |
67 |
68 | 69 | Username 70 | 71 | 72 | 80 | 81 | 82 | 83 | Password 84 | 85 | 86 | 93 | 94 | 95 | 103 | 108 | 109 |
110 | ); 111 | } 112 | } 113 | -------------------------------------------------------------------------------- /client/src/pages/NotFound.css: -------------------------------------------------------------------------------- 1 | .NotFound { 2 | padding-top: 75px; 3 | text-align: center; 4 | } 5 | 6 | .NotFound h1 { 7 | font-size: 72px; 8 | } 9 | -------------------------------------------------------------------------------- /client/src/pages/NotFound.js: -------------------------------------------------------------------------------- 1 | import React from 'react'; 2 | import './NotFound.css'; 3 | 4 | /** 5 | * This is rendered when a route is not found (404). 6 | */ 7 | export default () => 8 |
9 |

404

10 |

Uh-oh! Page not found. Are you in the right place?

11 |
; 12 | -------------------------------------------------------------------------------- /client/src/pages/Train.css: -------------------------------------------------------------------------------- 1 | .Train { 2 | padding: 20px 0; 3 | text-align: center; 4 | } 5 | 6 | .Train .LoadButton { 7 | width: 200px; 8 | margin: 0 auto; 9 | margin-bottom: 15px; 10 | } 11 | 12 | .Train .modelstatus { 13 | padding: 10px 5px 20px 5px; 14 | } 15 | 16 | .Train .loadingstatus.glyphicon { 17 | margin-top: 20px; 18 | margin-bottom: 20px; 19 | animation: spin 1s infinite linear; 20 | font-size: 42px; 21 | color: #888; 22 | } 23 | 24 | .Train .training.glyphicon { 25 | margin-right: 7px; 26 | animation: spin 1s infinite linear; 27 | } 28 | 29 | @keyframes spin { 30 | from { transform: scale(1) rotate(0deg); } 31 | to { transform: scale(1) rotate(360deg); } 32 | } 33 | -------------------------------------------------------------------------------- /client/src/pages/Train.js: -------------------------------------------------------------------------------- 1 | import React, { Component } from 'react'; 2 | import { Grid, Row, Col, Well, Glyphicon } from 'react-bootstrap'; 3 | import LoadButton from '../components/LoadButton'; 4 | import AlertDismissable from '../components/AlertDismissable'; 5 | import config from '../config'; 6 | import './Train.css'; 7 | import { handleFetchNonOK } from './util'; 8 | 9 | /** 10 | * Class to handle the rendering of the Train page where users can initialize 11 | * the training for their custom models. 12 | * @extends React.Component 13 | */ 14 | export default class Train extends Component { 15 | constructor(props) { 16 | super(props); 17 | 18 | this.state = { 19 | isLanguageSubmitting: false, 20 | isLanguageStatusLoading: false, 21 | isAcousticSubmitting: false, 22 | isAcousticStatusLoading: false, 23 | languageModelData: null, 24 | acousticModelData: null, 25 | languageModelError: '', 26 | acousticModelError: '' 27 | }; 28 | } 29 | 30 | async componentDidMount() { 31 | this.getStatusLanguageModel(); 32 | this.getStatusAcousticModel(); 33 | } 34 | 35 | componentWillUnmount() { 36 | clearInterval(this.interval); 37 | } 38 | 39 | handleDismiss = errorType => { 40 | this.setState({ [errorType]: '' }); 41 | } 42 | 43 | trainLanguageModel = async event => { 44 | event.preventDefault(); 45 | this.setState({ isLanguageSubmitting: true }); 46 | this.setState({ languageModelError: '' }); 47 | fetch(`${config.API_ENDPOINT}/train`, { 48 | method: 'POST', 49 | credentials: 'include', 50 | }) 51 | .then(handleFetchNonOK) 52 | .then((response) => { 53 | response.json().then((data) => { 54 | this.getStatusLanguageModel(); 55 | this.setState({ isLanguageSubmitting: false }); 56 | }); 57 | }) 58 | .catch((err) => { 59 | this.setState({ languageModelError: 60 | `Error initializing the training: ${err.message}`}); 61 | this.setState({ isLanguageSubmitting: false }); 62 | }); 63 | } 64 | 65 | trainAcousticModel = async event => { 66 | event.preventDefault(); 67 | this.setState({ isAcousticSubmitting: true }); 68 | fetch(`${config.API_ENDPOINT}/train-acoustic`, { 69 | method: 'POST', 70 | credentials: 'include', 71 | }) 72 | .then(handleFetchNonOK) 73 | .then((response) => { 74 | response.json().then((data) => { 75 | this.getStatusAcousticModel(); 76 | this.setState({ isAcousticSubmitting: false }); 77 | }); 78 | }) 79 | .catch((err) => { 80 | this.setState({ acousticModelError: 81 | `Error initializing the training: ${err.message}`}); 82 | this.setState({ isAcousticSubmitting: false }); 83 | }); 84 | } 85 | 86 | 87 | /** 88 | * Check if model is in a state that needs continuous polling to check for 89 | * updates. 90 | */ 91 | checkModelStatusDone = status => { 92 | let nonPollStatuses = ['ready', 'available', 'failed']; 93 | return nonPollStatuses.includes(status); 94 | 95 | } 96 | 97 | /** 98 | * This function will check if the model is in a state from which you can kick 99 | * off a training session from (i.e. not updating, pending, or training). 100 | */ 101 | checkModelTrainable = data => { 102 | if (!data) { 103 | return false; 104 | } 105 | else if (['ready', 'failed'].includes(data.status)) { 106 | return true; 107 | } 108 | return false; 109 | } 110 | 111 | /** 112 | * This function will give the appropriate CSS class to color the given 113 | * status. 114 | */ 115 | getStatusColor = status => { 116 | if (status === 'ready') { 117 | return 'text-info'; 118 | } 119 | else if (status === 'available') { 120 | return 'text-success'; 121 | } 122 | else if (status === 'training') { 123 | return 'text-warning'; 124 | } 125 | else if (status === 'failed') { 126 | return 'text-danger'; 127 | } 128 | else { 129 | return 'text-secondary'; 130 | } 131 | } 132 | 133 | pollLanguageModelStatus = async () => { 134 | this.getStatusLanguageModel(true); 135 | } 136 | 137 | pollAcousticModelStatus = async () => { 138 | this.getStatusAcousticModel(true); 139 | } 140 | 141 | getStatusLanguageModel = async (poll = false) => { 142 | if (!poll) this.setState({ isLanguageStatusLoading: true }); 143 | fetch(`${config.API_ENDPOINT}/model`, { 144 | method: 'GET', 145 | credentials: 'include' 146 | }) 147 | .then(handleFetchNonOK) 148 | .then((response) => { 149 | response.json().then((data) => { 150 | this.setState({ languageModelData: data.data }); 151 | let isNotActive = this.checkModelStatusDone(data.data.status); 152 | // If polling and if the model is no longer in an active state, stop 153 | // polling. 154 | if (isNotActive && poll) { 155 | clearInterval(this.interval); 156 | } 157 | // If it is in an active state, initiate the polling. 158 | else if (!isNotActive && !poll) { 159 | this.interval = setInterval(this.pollLanguageModelStatus, 5000); 160 | } 161 | if (!poll) this.setState({ isLanguageStatusLoading: false }); 162 | }); 163 | }) 164 | .catch((err) => { 165 | this.setState({ languageModelError: 166 | `Error getting language model data: ${err.message}`}); 167 | if (!poll) this.setState({ isLanguageStatusLoading: false }); 168 | }); 169 | } 170 | 171 | getStatusAcousticModel = async (poll = false) => { 172 | if (!poll) this.setState({ isAcousticStatusLoading: true }); 173 | fetch(`${config.API_ENDPOINT}/acoustic-model`, { 174 | method: 'GET', 175 | credentials: 'include' 176 | }) 177 | .then((response) => { 178 | response.json().then((data) => { 179 | this.setState({ acousticModelData: data.data }); 180 | let isNotActive = this.checkModelStatusDone(data.data.status); 181 | // If polling and if the model is no longer in an active state, stop 182 | // polling. 183 | if (isNotActive && poll) { 184 | clearInterval(this.interval); 185 | } 186 | // If it is in an active state, initiate the polling. 187 | else if (!isNotActive && !poll) { 188 | this.interval = setInterval(this.pollAcousticModelStatus, 5000); 189 | } 190 | if (!poll) this.setState({ isAcousticStatusLoading: false }); 191 | }); 192 | }) 193 | .catch((err) => { 194 | this.setState({ acousticModelError: 195 | `Error getting acoustic model data: ${err.message}` }); 196 | if (!poll) this.setState({ isAcousticStatusLoading: false }); 197 | }); 198 | } 199 | 200 | render() { 201 | return ( 202 |
203 |

Train Custom Models

204 |

If you have recently added language or audio resources, the model 205 | needs to be trained to account for the new data. Kick off a training session here. If a 206 | model's status is ready, then this indicates that the model contains data and 207 | is ready to be trained. A status of available indicates that the model is 208 | trained and ready to use.

209 | 210 | 211 | 212 | 213 |

Language Model Status

214 | 215 | {this.state.isLanguageStatusLoading && 216 | 217 | } 218 | {this.state.languageModelData && !this.state.isLanguageStatusLoading && 219 |
220 | Name: {this.state.languageModelData.name}
221 | Status:{' '} 222 | 223 | {this.state.languageModelData.status}{' '} 224 | {this.state.languageModelData.status ==='training' && 225 | 226 | } 227 | 228 |
229 | } 230 |
231 | 244 | this.handleDismiss('languageModelError')} /> 249 | 250 | 251 |

Acoustic Model Status

252 | 253 | {this.state.isAcousticStatusLoading && 254 | 255 | } 256 | {this.state.acousticModelData && !this.state.isAcousticStatusLoading && 257 |
258 | Name: {this.state.acousticModelData.name}
259 | Status:{' '} 260 | 261 | {this.state.acousticModelData.status}{' '} 262 | {this.state.acousticModelData.status ==='training' && 263 | 264 | } 265 | 266 |
267 | } 268 |
269 | 282 | this.handleDismiss('acousticModelError')} /> 287 | 288 |
289 |
290 |
291 | ); 292 | } 293 | } 294 | -------------------------------------------------------------------------------- /client/src/pages/Transcribe.css: -------------------------------------------------------------------------------- 1 | .STTForm { 2 | text-align: center; 3 | margin-top: 20px; 4 | display: block; 5 | margin: 0 auto; 6 | max-width: 700px; 7 | min-width: 350px; 8 | } 9 | 10 | .STTForm form { 11 | margin-top: 20px; 12 | padding-bottom: 15px; 13 | } 14 | 15 | .STTForm select { 16 | min-width: 320px; 17 | max-width: 480px; 18 | display: block; 19 | margin: 0 auto; 20 | } 21 | 22 | .STTForm .panel:hover { 23 | cursor: pointer; 24 | } 25 | 26 | .STTForm .panel-arrow { 27 | float:right; 28 | } 29 | 30 | .STTForm form textarea { 31 | width: 100%; 32 | height: 100px; 33 | font-size: 14px; 34 | 35 | } 36 | 37 | .STTForm form textarea:disabled { 38 | background: #e9e9e9; 39 | } 40 | 41 | .STTForm .audiofile { 42 | width: 0.1px; 43 | height: 0.1px; 44 | opacity: 0; 45 | overflow: hidden; 46 | position: absolute; 47 | z-index: -1; 48 | } 49 | 50 | .STTForm .audiolabel { 51 | font-size: 16px; 52 | color: #555; 53 | font-weight: 500; 54 | height: 34px; 55 | padding: 6px 12px; 56 | background-color: #fff; 57 | display: inline-block; 58 | cursor: pointer; 59 | border: 1px solid #ccc; 60 | border-radius: 4px; 61 | margin-bottom: 0px; 62 | } 63 | 64 | .STTForm .audiolabel:focus, 65 | .STTForm .audiolabel:hover { 66 | background-color: #dedede; 67 | } 68 | 69 | .STTForm .help-block { 70 | font-size: 12px; 71 | } 72 | 73 | .STTForm .LoadButton { 74 | width: 200px; 75 | margin: 0 auto; 76 | margin-bottom: 15px; 77 | } 78 | 79 | .STTForm .adjustmentinfo { 80 | font-size: 14px; 81 | } 82 | 83 | .STTForm .adjustmentinfo .LoadButton { 84 | margin-top: 12px; 85 | } 86 | 87 | .STTForm #corpusName { 88 | max-width: 300px; 89 | margin: 0 auto; 90 | font-size: 12px; 91 | } 92 | 93 | .STTForm .btn-circle { 94 | width: 30px; 95 | height: 30px; 96 | padding: 6px 0px; 97 | border-radius: 15px; 98 | text-align: center; 99 | font-size: 12px; 100 | line-height: 1.42857; 101 | } 102 | -------------------------------------------------------------------------------- /client/src/pages/Transcribe.js: -------------------------------------------------------------------------------- 1 | import React, { Component, Fragment } from 'react'; 2 | import ReactDOM from 'react-dom'; 3 | import { 4 | Checkbox, Panel, Form, FormGroup, FormControl, ControlLabel, 5 | Glyphicon, HelpBlock, OverlayTrigger, Tooltip, ToggleButtonGroup, 6 | ToggleButton 7 | } from 'react-bootstrap'; 8 | import LoadButton from '../components/LoadButton'; 9 | import AlertDismissable from '../components/AlertDismissable'; 10 | import config from '../config'; 11 | import './Transcribe.css'; 12 | import { handleFetchNonOK } from './util'; 13 | 14 | const HIDE = {display: 'none'}; 15 | const SHOW = {display: 'inline-block'}; 16 | /** 17 | * Class to handle the rendering of the Transcribe page where users can submit audio files to have 18 | * transcribed. 19 | * @extends React.Component 20 | */ 21 | 22 | export default class Transcribe extends Component { 23 | constructor(props) { 24 | super(props); 25 | 26 | this.file = null; 27 | this.socket = null; 28 | this.textArea = null; 29 | this.audioPlayer = null; 30 | this.playQueue = []; 31 | this.audioOp = null; 32 | this.audioOpDisplay = HIDE; 33 | 34 | this.state = { 35 | isTranscribing: false, 36 | isSubmitting: false, 37 | content: '', 38 | filename: null, 39 | hasTranscribed: false, 40 | improveAcousticChecked: false, 41 | transcribeError: '', 42 | submitError: '', 43 | fileSettingsOpen: true, 44 | corpusName: '', 45 | userStopAudio: false, 46 | audioOpSelection: 'play' 47 | }; 48 | } 49 | 50 | componentDidMount() { 51 | this.socket = new WebSocket(config.WS_ENDPOINT); 52 | this.socket.onopen = () => { 53 | // console.log('Socket open.'); 54 | }; 55 | this.socket.onmessage = (message) => { 56 | const data = JSON.parse(message.data); 57 | if(data.transcript) { 58 | if (this.state.userStopAudio) { 59 | // user end the audio, directly render the transcript 60 | this.setState( 61 | { content: `${this.state.content}${data.transcript}\r\n` }); 62 | } else { 63 | this.playQueue.push(data); 64 | } 65 | } 66 | if (data.finished) { 67 | let newState = {isTranscribing: false, content: this.state.content}; 68 | let oldState = {...this.state, ...newState}; 69 | this.handleTranscriptQueue(oldState, newState); 70 | this.setState(newState); 71 | } 72 | if (data.error) { 73 | if (this.audioPlayer.played) { 74 | this.handleAudioOp('stop'); 75 | } 76 | this.setState( 77 | { transcribeError: data.error, 78 | isTranscribing: false 79 | }); 80 | } 81 | }; 82 | } 83 | 84 | getRemainingTranscripts() { 85 | if (this.playQueue.length === 0) return undefined; 86 | const remaining = this.playQueue.map( (data) => { 87 | return data.transcript; 88 | }).join('\r\n'); 89 | this.playQueue.length = 0; 90 | return remaining; 91 | } 92 | 93 | handleTranscriptQueue = (oldState, newState) => { 94 | if (this.audioPlayer.ended || oldState.userStopAudio) { 95 | // audio ends or user ends the audio 96 | if (!oldState.isTranscribing) { 97 | Object.assign(newState, { 98 | hasTranscribed: true, 99 | fileSettingsOpen: false 100 | }); 101 | } 102 | let remaining = this.getRemainingTranscripts(); 103 | if (remaining !== undefined) { 104 | newState.content = `${oldState.content}${remaining}\r\n`; 105 | } 106 | } 107 | } 108 | 109 | handleAudioOp = (event) => { 110 | switch (event) { 111 | case ('stop'): 112 | { 113 | this.audioPlayer.pause(); 114 | this.audioOpDisplay = HIDE; 115 | let newState = { userStopAudio: true, audioOpSelection: 'play' }; 116 | let oldState = {...this.state, userStopAudio: true}; 117 | this.handleTranscriptQueue(oldState, newState); 118 | this.setState(newState); 119 | break; 120 | } 121 | case ('play'): 122 | this.audioPlayer.play(); 123 | this.setState({audioOpSelection: 'play'}); 124 | break; 125 | case ('pause'): 126 | this.audioPlayer.pause(); 127 | this.setState({audioOpSelection: 'pause'}); 128 | break; 129 | default: 130 | break; 131 | } 132 | } 133 | 134 | playAudio() { 135 | if (this.state.isTranscribing === true) { 136 | let reader = new FileReader(); 137 | // console.log(`transcriptQueue shall be empty: ${this.playQueue.length}`); 138 | this.playQueue.length = 0; 139 | reader.onload = (e) => { 140 | this.audioPlayer.src = e.target.result; 141 | this.audioPlayer.play(); 142 | this.audioPlayer.onended = () => { 143 | const newState = {}; 144 | this.audioPlayer.ontimeupdate = null; 145 | this.audioOpDisplay = HIDE; 146 | newState.audioOpSelection = 'play'; 147 | this.handleTranscriptQueue(this.state, newState); 148 | this.setState(newState); 149 | }; 150 | }; 151 | this.audioPlayer.ontimeupdate = (e) => { 152 | if (this.playQueue.length > 0) { 153 | if (this.audioPlayer.currentTime > this.playQueue[0].start) { 154 | const transcript = this.playQueue.shift().transcript; 155 | this.setState( 156 | { content: `${this.state.content}${transcript}\r\n` }); 157 | } 158 | } 159 | }; 160 | reader.readAsDataURL(this.file); 161 | } 162 | } 163 | 164 | componentDidUpdate() { 165 | if (this.state.isTranscribing || this.audioPlayer.played) { 166 | // can't find the way to set the scrollTop for this.textArea.current 167 | // using ReactDOM is the last option 168 | let textArea = ReactDOM.findDOMNode(this.textArea); 169 | textArea.scrollTop = textArea.scrollHeight; 170 | } 171 | } 172 | 173 | componentWillUnmount() { 174 | if (this.socket) { 175 | this.socket.onmessage = null; 176 | this.socket.close(); 177 | // console.log('close socket'); 178 | } 179 | if (this.audioPlayer) { 180 | this.audioPlayer.ontimeupdate = null; 181 | this.audioPlayer.onended = null; 182 | } 183 | } 184 | 185 | handleChange = event => { 186 | this.setState({ 187 | [event.target.id]: event.target.value 188 | }); 189 | } 190 | 191 | handleFileChange = event => { 192 | if (event.target.files.length) { 193 | this.file = event.target.files[0]; 194 | this.setState({ 'filename': this.file.name }); 195 | } 196 | } 197 | 198 | handlePanelToggle = event => { 199 | this.setState({ 'fileSettingsOpen': !this.state.fileSettingsOpen }); 200 | } 201 | 202 | handleAcousticChange = event => { 203 | this.setState({ 'improveAcousticChecked': !this.state.improveAcousticChecked }); 204 | } 205 | 206 | handleTranscribe = async event => { 207 | event.preventDefault(); 208 | 209 | this.setState({ transcribeError: '' }); 210 | if (this.file && this.file.size > config.MAX_AUDIO_SIZE) { 211 | this.setState({ 212 | transcribeError: 213 | `Please pick a file smaller than ${config.MAX_AUDIO_SIZE/1000000} MB.` 214 | }); 215 | return; 216 | } 217 | 218 | let formData = new FormData(); 219 | formData.append('audio', this.file); 220 | formData.append('languageModel', this.languageModelType.value); 221 | formData.append('acousticModel', this.acousticModelType.value); 222 | 223 | fetch(`${config.API_ENDPOINT}/transcribe`, { 224 | method: 'POST', 225 | body: formData, 226 | credentials: 'include' 227 | }) 228 | .then(handleFetchNonOK) 229 | .then((response) => { 230 | response.json().then((data) => { 231 | // the transcription is received via WebSocket 232 | this.audioOpDisplay = SHOW; 233 | this.setState( 234 | { content: '', hasTranscribed: false, 235 | isTranscribing: true, userStopAudio: false 236 | }); 237 | this.socket.send(JSON.stringify({tid: data.tid})); 238 | this.playAudio(); 239 | }); 240 | }) 241 | .catch((err) => { 242 | this.setState({ transcribeError: `Could not transcribe: ${err.message}`}); 243 | this.setState({ isTranscribing: false }); 244 | }); 245 | } 246 | 247 | handleDismiss = errorType => { 248 | this.setState({ [errorType]: '' }); 249 | } 250 | 251 | validateCorpusName = () => { 252 | let invalidChars = /\s|\/|\\/; 253 | return ( 254 | this.state.corpusName.length > 0 && 255 | this.state.corpusName.length <= 128 && 256 | !invalidChars.test(this.state.corpusName) 257 | ); 258 | } 259 | 260 | handleSubmit = async event => { 261 | event.preventDefault(); 262 | 263 | this.setState({ isSubmitting: true }); 264 | this.setState({ submitError: '' }); 265 | 266 | 267 | // Upload corpora. 268 | fetch(`${config.API_ENDPOINT}/corpora`, { 269 | method: 'POST', 270 | body: JSON.stringify({'corpusName': this.state.corpusName, 271 | 'corpus': this.state.content}), 272 | credentials: 'include', 273 | headers: { 274 | 'Content-Type': 'application/json' 275 | }, 276 | }) 277 | .then(handleFetchNonOK) 278 | .then((response) => { 279 | response.json().then((data) => { 280 | // Corpora uploaded successfully, so upload audio resource if option 281 | // was selected. 282 | if (this.state.improveAcousticChecked) { 283 | 284 | let formData = new FormData(); 285 | formData.append('audio', this.file); 286 | formData.append('audioName', this.state.corpusName + '-audio'); 287 | fetch(`${config.API_ENDPOINT}/audio`, { 288 | method: 'POST', 289 | body: formData, 290 | credentials: 'include', 291 | }) 292 | .then(handleFetchNonOK) 293 | .then((response) => { 294 | response.json().then((data) => { 295 | // Redirect to corpora page to see status. 296 | this.props.history.push('/corpora'); 297 | this.setState({ isSubmitting: false }); 298 | }); 299 | }) 300 | .catch((err) => { 301 | this.setState({ submitError: 302 | `Could not add audio: ${err.message}`}); 303 | this.setState({ isSubmitting: false }); 304 | }); 305 | } 306 | // User chose not to upload audio. 307 | else { 308 | // Redirect to corpora page to see status. 309 | this.props.history.push('/corpora'); 310 | this.setState({ isSubmitting: false }); 311 | } 312 | }); 313 | }) 314 | .catch((err) => { 315 | this.setState({ submitError: `Could not add corpus: ${err.message}`}); 316 | this.setState({ isSubmitting: false }); 317 | }); 318 | } 319 | 320 | render() { 321 | const nodisplay = {display: 'none'}; 322 | return ( 323 |
324 |

Custom Speech Transcriber

325 |

Convert audio to text using the customized models.

326 | 331 | 332 | 333 | Select models and audio file{' '} 334 | 335 | { this.state.fileSettingsOpen 336 | ? 337 | : 338 | } 339 | 340 | 341 | 342 | 343 | 344 | 409 | 410 | 411 |
412 | {this.audioOp = audioOp;}} 415 | value={this.state.audioOpSelection} onChange={this.handleAudioOp} 416 | defaultValue={this.state.audioOpSelection}> 417 | { 418 | ['play', 'pause', 'stop'].map((item) => { 419 | return ( 420 | 423 | 424 | 425 | ); 426 | }) 427 | } 428 | 429 |

Your Transcription

430 | 431 | {this.textArea = textArea;}} 436 | /> 437 | 438 |
439 | { this.state.hasTranscribed && 440 | 441 |
442 | {' '} 443 | Not quite correct? Make adjustments in the text box above and submit 444 | it as a corpus to help improve the language model. Just give the corpus a name and 445 | click the submit button below.

446 | 447 | 448 | Add audio file to acoustic model{' '} 452 | 454 | Additionally, you can improve the quality of the acoustic model by uploading 455 | your audio with the transcription. 456 | 457 | }> 458 | 459 | 460 | 461 | 462 | 463 | Corpus Name 464 | 470 | 471 | Add a name to this corpus to help identify it. 472 | The name must be no more than 128 characters with no spaces or slashes. 473 | 474 | 475 | 485 | this.handleDismiss('submitError')} /> 490 |
491 |
492 | } 493 |
494 | ); 495 | } 496 | } 497 | -------------------------------------------------------------------------------- /client/src/pages/Words.css: -------------------------------------------------------------------------------- 1 | .Words { 2 | text-align: center; 3 | margin-top: 20px; 4 | display: block; 5 | margin: 0 auto; 6 | max-width: 900px; 7 | min-width: 350px; 8 | } 9 | 10 | .Words table { 11 | margin-top: 20px; 12 | } 13 | 14 | 15 | .Words td, th { 16 | text-align: center; 17 | } 18 | 19 | .Words .tableload.glyphicon { 20 | margin-right: 7px; 21 | margin-top: 20px; 22 | animation: spin 1s infinite linear; 23 | font-size: 42px; 24 | color: #888; 25 | } 26 | 27 | @keyframes spin { 28 | from { transform: scale(1) rotate(0deg); } 29 | to { transform: scale(1) rotate(360deg); } 30 | } 31 | 32 | .Words .panel-heading .badge { 33 | background-color: #aaa; 34 | } 35 | 36 | .Words .panel { 37 | max-width: 400px; 38 | margin: 5px auto; 39 | } 40 | 41 | .Words .panel-body { 42 | padding: 10px; 43 | } 44 | 45 | .Words .panel-option { 46 | float: right; 47 | } 48 | 49 | .Words .panel-option:hover { 50 | color: #0765D0; 51 | } 52 | 53 | .Words .panel-trash { 54 | float: right; 55 | margin-left: 2px; 56 | cursor: pointer; 57 | } 58 | 59 | .Words .panel-trash:hover { 60 | color: #950005; 61 | } 62 | 63 | .Words .panel-trash:active { 64 | color: #5A0003; 65 | } 66 | 67 | .Words .panel .panel-title { 68 | font-size: 14px; 69 | font-weight: 700; 70 | color: #444; 71 | } 72 | 73 | .Words .soundslike { 74 | font-size: 12px; 75 | } 76 | 77 | .Words .soundslike textarea { 78 | font-size: 12px; 79 | } 80 | -------------------------------------------------------------------------------- /client/src/pages/Words.js: -------------------------------------------------------------------------------- 1 | import React, { Component } from 'react'; 2 | import { FormGroup, FormControl, Glyphicon, HelpBlock, Panel } from 'react-bootstrap'; 3 | import LoadButton from '../components/LoadButton'; 4 | import AlertDismissable from '../components/AlertDismissable'; 5 | import config from '../config'; 6 | import './Words.css'; 7 | import { handleFetchNonOK } from './util'; 8 | 9 | /** 10 | * Class to handle the rendering of the Words page where users can view and manage custom words. 11 | * @extends React.Component 12 | */ 13 | export default class Words extends Component { 14 | constructor(props) { 15 | super(props); 16 | 17 | this.words = []; 18 | 19 | this.state = { 20 | isLoading: false, 21 | words: [], 22 | listError: '', 23 | error: '', 24 | }; 25 | } 26 | 27 | async componentDidMount() { 28 | this.handleGetList(); 29 | } 30 | 31 | handleDismiss = errorType => { 32 | this.setState({ [errorType]: '' }); 33 | } 34 | 35 | handleGetList = async () => { 36 | this.setState({ listError: '' }); 37 | this.setState({ isLoading: true }); 38 | fetch(`${config.API_ENDPOINT}/words`, { 39 | method: 'GET', 40 | credentials: 'include' 41 | }) 42 | .then(handleFetchNonOK) 43 | .then((response) => { 44 | response.json().then((data) => { 45 | let sortedWords = data.words.sort( 46 | (a, b) => (a.word < b.word) ? -1 : ((a.word > b.word) ? 1 : 0) 47 | ); 48 | this.setState({ words: sortedWords }); 49 | this.setState({ isLoading: false }); 50 | }); 51 | }) 52 | .catch((err) => { 53 | this.setState({ listError: err.message }); 54 | this.setState({ isLoading: false }); 55 | }); 56 | } 57 | 58 | handleDelete = async wordIndex => { 59 | let word = this.state.words[wordIndex].word; 60 | this.setState({ isLoading: true }); 61 | fetch(`${config.API_ENDPOINT}/words/` + word.replace(' ', '-'), { 62 | method: 'DELETE', 63 | credentials: 'include' 64 | }) 65 | .then(handleFetchNonOK) 66 | .then((response) => { 67 | this.handleGetList(); 68 | }) 69 | .catch((err) => { 70 | this.setState({ error: err.message }); 71 | this.setState({ isLoading: false }); 72 | }); 73 | } 74 | 75 | handleAdd = async wordIndex => { 76 | let loadingKey = 'isLoading' + wordIndex; 77 | this.setState({ [loadingKey]: true }); 78 | fetch(`${config.API_ENDPOINT}/words`, { 79 | method: 'POST', 80 | body: JSON.stringify(this.state.words[wordIndex]), 81 | credentials: 'include', 82 | headers: { 83 | 'Content-Type': 'application/json' 84 | }, 85 | }) 86 | .then(handleFetchNonOK) 87 | .then((response) => { 88 | response.json().then((data) => { 89 | console.log(data); 90 | }); 91 | this.setState({ [loadingKey]: false }); 92 | }) 93 | .catch((err) => { 94 | this.setState({ [loadingKey]: false }); 95 | }); 96 | } 97 | 98 | handleSoundsLike = wordIndex => event => { 99 | event.preventDefault(); 100 | let words = [...this.state.words]; 101 | words[wordIndex].sounds_like = event.target.value.split(',').slice(0, 5); 102 | this.setState({ words: words }); 103 | } 104 | 105 | submitWord = wordIndex => event => { 106 | event.preventDefault(); 107 | this.handleAdd(wordIndex); 108 | } 109 | 110 | render() { 111 | return ( 112 |
113 |

Custom Word List ({this.state.words.length})

114 |

These are the out-of-vocabulary words extracted from all the submitted corpora.

115 | { this.state.isLoading && } 116 | { !this.state.isLoading && this.state.words.length <= 0 && !this.state.listError && 117 |


No out-of-vocabulary words

118 | } 119 | { !this.state.isLoading && this.state.words.length > 0 && 120 | this.state.words.map((word, index) => { 121 | return ( 122 | 123 | 124 | 125 | {word.word} 126 | 127 | { 128 | if (window.confirm('Delete this word?')) { 129 | this.handleDelete(index); 130 | }}} /> 131 | 132 | 133 | 134 | 135 | 136 | 137 | 138 | 139 | 140 | 141 |
142 | Sounds Like:
143 |
144 | 145 | 150 | 151 | Add up to five comma-separated 'sounds-like' strings. For example, 152 | you might specify that the word 'IEEE' can sound like 'i triple e'. 153 | 154 | 155 | 165 | 166 |
167 |
168 |
169 |
170 | ); 171 | }) 172 | } 173 | this.handleDismiss('listError')} /> 178 |
179 | ); 180 | } 181 | } 182 | -------------------------------------------------------------------------------- /client/src/pages/util.js: -------------------------------------------------------------------------------- 1 | 2 | /** 3 | * Check if the status code of the response is 200 or not 4 | * @param response express response object 5 | */ 6 | 7 | const JSON_HEADER = /application\/json/i; 8 | 9 | export let handleFetchNonOK = async ( response ) => { 10 | if (!response.ok) { 11 | // Get error message from the 'error' property, if the payload is JSON 12 | if (JSON_HEADER.test(response.headers.get('Content-Type'))) { 13 | const json = await response.json(); 14 | throw Error(json.error || response.statusText); 15 | } 16 | throw Error(response.statusText); 17 | } 18 | return response; 19 | }; 20 | -------------------------------------------------------------------------------- /client/src/serviceWorker.js: -------------------------------------------------------------------------------- 1 | // This optional code is used to register a service worker. 2 | // register() is not called by default. 3 | 4 | // This lets the app load faster on subsequent visits in production, and gives 5 | // it offline capabilities. However, it also means that developers (and users) 6 | // will only see deployed updates on subsequent visits to a page, after all the 7 | // existing tabs open on the page have been closed, since previously cached 8 | // resources are updated in the background. 9 | 10 | // To learn more about the benefits of this model and instructions on how to 11 | // opt-in, read http://bit.ly/CRA-PWA 12 | 13 | const isLocalhost = Boolean( 14 | window.location.hostname === 'localhost' || 15 | // [::1] is the IPv6 localhost address. 16 | window.location.hostname === '[::1]' || 17 | // 127.0.0.1/8 is considered localhost for IPv4. 18 | window.location.hostname.match( 19 | /^127(?:\.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}$/ 20 | ) 21 | ); 22 | 23 | export function register(config) { 24 | if (process.env.NODE_ENV === 'production' && 'serviceWorker' in navigator) { 25 | // The URL constructor is available in all browsers that support SW. 26 | const publicUrl = new URL(process.env.PUBLIC_URL, window.location.href); 27 | if (publicUrl.origin !== window.location.origin) { 28 | // Our service worker won't work if PUBLIC_URL is on a different origin 29 | // from what our page is served on. This might happen if a CDN is used to 30 | // serve assets; see https://github.com/facebook/create-react-app/issues/2374 31 | return; 32 | } 33 | 34 | window.addEventListener('load', () => { 35 | const swUrl = `${process.env.PUBLIC_URL}/service-worker.js`; 36 | 37 | if (isLocalhost) { 38 | // This is running on localhost. Let's check if a service worker still exists or not. 39 | checkValidServiceWorker(swUrl, config); 40 | 41 | // Add some additional logging to localhost, pointing developers to the 42 | // service worker/PWA documentation. 43 | navigator.serviceWorker.ready.then(() => { 44 | console.log( 45 | 'This web app is being served cache-first by a service ' + 46 | 'worker. To learn more, visit http://bit.ly/CRA-PWA' 47 | ); 48 | }); 49 | } else { 50 | // Is not localhost. Just register service worker 51 | registerValidSW(swUrl, config); 52 | } 53 | }); 54 | } 55 | } 56 | 57 | function registerValidSW(swUrl, config) { 58 | navigator.serviceWorker 59 | .register(swUrl) 60 | .then(registration => { 61 | registration.onupdatefound = () => { 62 | const installingWorker = registration.installing; 63 | if (installingWorker == null) { 64 | return; 65 | } 66 | installingWorker.onstatechange = () => { 67 | if (installingWorker.state === 'installed') { 68 | if (navigator.serviceWorker.controller) { 69 | // At this point, the updated precached content has been fetched, 70 | // but the previous service worker will still serve the older 71 | // content until all client tabs are closed. 72 | console.log( 73 | 'New content is available and will be used when all ' + 74 | 'tabs for this page are closed. See http://bit.ly/CRA-PWA.' 75 | ); 76 | 77 | // Execute callback 78 | if (config && config.onUpdate) { 79 | config.onUpdate(registration); 80 | } 81 | } else { 82 | // At this point, everything has been precached. 83 | // It's the perfect time to display a 84 | // "Content is cached for offline use." message. 85 | console.log('Content is cached for offline use.'); 86 | 87 | // Execute callback 88 | if (config && config.onSuccess) { 89 | config.onSuccess(registration); 90 | } 91 | } 92 | } 93 | }; 94 | }; 95 | }) 96 | .catch(error => { 97 | console.error('Error during service worker registration:', error); 98 | }); 99 | } 100 | 101 | function checkValidServiceWorker(swUrl, config) { 102 | // Check if the service worker can be found. If it can't reload the page. 103 | fetch(swUrl) 104 | .then(response => { 105 | // Ensure service worker exists, and that we really are getting a JS file. 106 | const contentType = response.headers.get('content-type'); 107 | if ( 108 | response.status === 404 || 109 | (contentType != null && contentType.indexOf('javascript') === -1) 110 | ) { 111 | // No service worker found. Probably a different app. Reload the page. 112 | navigator.serviceWorker.ready.then(registration => { 113 | registration.unregister().then(() => { 114 | window.location.reload(); 115 | }); 116 | }); 117 | } else { 118 | // Service worker found. Proceed as normal. 119 | registerValidSW(swUrl, config); 120 | } 121 | }) 122 | .catch(() => { 123 | console.log( 124 | 'No internet connection found. App is running in offline mode.' 125 | ); 126 | }); 127 | } 128 | 129 | export function unregister() { 130 | if ('serviceWorker' in navigator) { 131 | navigator.serviceWorker.ready.then(registration => { 132 | registration.unregister(); 133 | }); 134 | } 135 | } 136 | -------------------------------------------------------------------------------- /cmd/README.md: -------------------------------------------------------------------------------- 1 | # Manual interaction with Watson Speech service 2 | 3 | This directory contains a number of short Python programs that invoke the API call to the `Watson Speech to Text` service. 4 | 5 | These programs are derived from the [API description](https://cloud.ibm.com/apidocs/speech-to-text#introduction). 6 | 7 | They can be used as simple command line tools to interact with the speech service, complementing the UI tool to illustrate the concept and operation of the speech service while allowing more flexibility. They are intended as working code examples and not a full featured command line interface. 8 | 9 | The workflow is as follows: 10 | 11 | 1. Create a custom language model based on an existing base model 12 | - create_language_model.py 13 | - list_language_model.py 14 | - delete_language_model.py 15 | 16 | 2. Add one or more corpus to the custom language model. The corpora are plain text files consisting of sentences used in your particular domain, such as medical transcription. 17 | - add_corpus.py 18 | - list_corpus.py 19 | - delete_corpus.py 20 | 21 | 3. Train the custom language model 22 | - train_language.py 23 | 24 | 4. Create a custom acoustic model 25 | - create_acoustic_model.py 26 | - list_acoustic_model.py 27 | - delete_acoustic_model.py 28 | 29 | 5. Add one or more audio sources to the custom acoustic model. These are voice recordings in your particular domain, such as medical dictation. 30 | - add_audio.py 31 | - list_audio.py 32 | - delete_audio.py 33 | 34 | 6. Train the custom acoustic model 35 | - train_acoustic.py 36 | 37 | 7. Submit a new voice recording to transcribe to text, using both of your custom language and acoustic models. 38 | - transcribe.py 39 | 40 | The python programs use the package *requests*. You can install by: 41 | 42 | ```bash 43 | pip install requests 44 | ``` 45 | 46 | To run the Python programs, please set the following environment variables: 47 | 48 | ```bash 49 | export USERNAME=apikey 50 | export PASSWORD= 51 | export STT_ENDPOINT= 52 | export LANGUAGE_ID= 53 | export ACOUSTIC_ID= 54 | ``` 55 | 56 | You can obtain your apikey credentials from the [IBM Cloud page](https://console.bluemix.net/docs/services/watson/getting-started-credentials.html): 57 | 58 | You can obtain the ID of your custom language or acoustic models by listing the models and using the *customization_id* attribute. 59 | -------------------------------------------------------------------------------- /cmd/add_audio.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import requests 3 | import json 4 | import codecs 5 | import sys, time 6 | from requests.packages.urllib3.exceptions import InsecureRequestWarning 7 | import env 8 | 9 | requests.packages.urllib3.disable_warnings(InsecureRequestWarning) 10 | 11 | ########################################################################## 12 | # Add an archive of audio files (wav files) 13 | # You can add multiple audio sources to an acousic model 14 | ########################################################################## 15 | 16 | audio_filename = env.get_arg("audio filename") 17 | print("\nAdding audio source ...") 18 | 19 | headers = {'Content-Type' : "application/zip"} 20 | uri = env.get_endpoint() + "/v1/acoustic_customizations/"+env.get_acoustic_id()+"/audio/"+audio_filename 21 | 22 | with open(audio_filename, 'rb') as f: 23 | r = requests.post(uri, auth=(env.get_username(),env.get_password()), verify=False, headers=headers, data=f) 24 | 25 | print("Adding audio source returns: ", r.status_code) 26 | if r.status_code != 201: 27 | print("Failed to add audio source") 28 | print(r.text) 29 | sys.exit(-1) 30 | else: 31 | sys.exit(0) 32 | -------------------------------------------------------------------------------- /cmd/add_corpus.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import requests 3 | import json 4 | import codecs 5 | import sys, time 6 | from requests.packages.urllib3.exceptions import InsecureRequestWarning 7 | import env 8 | 9 | requests.packages.urllib3.disable_warnings(InsecureRequestWarning) 10 | 11 | ########################################################################## 12 | # Step 1: Add a corpus file (plain text file) 13 | ########################################################################## 14 | # 'dictation_fixed.txt' is name of local file containing the corpus to be uploaded 15 | # 'dictation-1' is the name of the new corpus 16 | # >>>> REPLACE THE VALUES BELOW WITH YOUR OWN CORPUS FILE AND NAME 17 | #corpus_file = "dictation_fixed.txt" 18 | #corpus_name = "dictation-1" 19 | corpus_file = env.get_arg("corpus filename") 20 | print("\nAdding corpus file: ", corpus_file) 21 | 22 | headers = {'Content-Type' : "application/json"} 23 | uri = env.get_endpoint() + "/v1/customizations/"+env.get_language_id()+"/corpora/"+corpus_file 24 | with open(corpus_file, 'rb') as f: 25 | r = requests.post(uri, auth=(env.get_username(),env.get_password()), verify=False, headers=headers, data=f) 26 | 27 | print("Adding corpus file returns: ", r.status_code) 28 | if r.status_code != 201: 29 | print("Failed to add corpus file") 30 | print(r.text) 31 | sys.exit(-1) 32 | 33 | ########################################################################## 34 | # Step 2: Get status of corpus file just added. 35 | # After corpus is uploaded, there is some analysis done to extract OOVs. 36 | # You cannot upload a new corpus or words while this analysis is on-going so 37 | # we need to loop until the status becomes 'analyzed' for this corpus. 38 | ########################################################################## 39 | print("Checking status of corpus analysis...") 40 | 41 | uri = env.get_endpoint() + "/v1/customizations/"+env.get_language_id()+"/corpora/"+corpus_file 42 | r = requests.get(uri, auth=(env.get_username(),env.get_password()), verify=False, headers=headers) 43 | respJson = r.json() 44 | status = respJson['status'] 45 | time_to_run = 10 46 | while (status != 'analyzed'): 47 | time.sleep(10) 48 | r = requests.get(uri, auth=(env.get_username(),env.get_password()), verify=False, headers=headers) 49 | respJson = r.json() 50 | status = respJson['status'] 51 | print("status: ", status, "(", time_to_run, ")") 52 | time_to_run += 10 53 | 54 | print("Corpus analysis done!") 55 | 56 | ########################################################################## 57 | # Step 3: get the list of all OOVs found 58 | # This step is only necessary if user wants to look at the OOVs and 59 | # validate the auto-added sounds-like field. Probably a good thing to do though. 60 | ########################################################################## 61 | print("\nListing words...") 62 | 63 | uri = env.get_endpoint() + "/v1/customizations/"+env.get_language_id()+"/words?sort=count" 64 | r = requests.get(uri, auth=(env.get_username(),env.get_password()), verify=False, headers=headers) 65 | 66 | print("Listing words returns: ", r.status_code) 67 | file=codecs.open(env.get_language_id()+".OOVs.corpus", 'wb', 'utf-8') 68 | file.write(r.text) 69 | print("Words list from added corpus saved in file: ", env.get_language_id(), ".OOVs.corpus") 70 | 71 | sys.exit(0) 72 | -------------------------------------------------------------------------------- /cmd/create_acoustic_model.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import requests 3 | import json 4 | import codecs 5 | import sys, time 6 | from requests.packages.urllib3.exceptions import InsecureRequestWarning 7 | import env 8 | 9 | requests.packages.urllib3.disable_warnings(InsecureRequestWarning) 10 | 11 | ########################################################################## 12 | # Create a new custom acoustic model 13 | ########################################################################## 14 | 15 | model_name = env.get_arg("acoustic model name") 16 | print("\nCreate a new acoustic custom model: "+model_name) 17 | 18 | headers = {'Content-Type' : "application/json"} 19 | data = {"name" : model_name, "base_model_name" : "en-US_NarrowbandModel", "description" : "My narrowband acoustic model"} 20 | uri = env.get_endpoint() + "/v1/acoustic_customizations" 21 | jsonObject = json.dumps(data).encode('utf-8') 22 | resp = requests.post(uri, auth=(env.get_username(),env.get_password()), verify=False, headers=headers, data=jsonObject) 23 | 24 | print("Create acoustic models returns: ", resp.status_code) 25 | if resp.status_code != 201: 26 | print("Failed to create acoustic model") 27 | print(resp.text) 28 | sys.exit(-1) 29 | else: 30 | print(resp.text) 31 | sys.exit(0) 32 | -------------------------------------------------------------------------------- /cmd/create_language_model.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import requests 3 | import json 4 | import codecs 5 | import sys, time 6 | from requests.packages.urllib3.exceptions import InsecureRequestWarning 7 | import env 8 | 9 | requests.packages.urllib3.disable_warnings(InsecureRequestWarning) 10 | 11 | ########################################################################## 12 | # Create a custom language model 13 | ########################################################################## 14 | 15 | model_name = env.get_arg("language model name") 16 | print("\nCreate a new language custom model: "+model_name) 17 | 18 | headers = {'Content-Type' : "application/json"} 19 | data = {"name" : model_name, "base_model_name" : "en-US_NarrowbandModel", "description" : "My narrowband language model"} 20 | uri = env.get_endpoint() + "/v1/customizations/" 21 | jsonObject = json.dumps(data).encode('utf-8') 22 | r = requests.post(uri, auth=(env.get_username(),env.get_password()), verify=False, headers=headers, data=jsonObject) 23 | 24 | print("Create model returns: ", r.status_code) 25 | print(r.text) 26 | 27 | sys.exit(0) 28 | -------------------------------------------------------------------------------- /cmd/delete_acoustic_model.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import requests 3 | import json 4 | import codecs 5 | import sys, time 6 | from requests.packages.urllib3.exceptions import InsecureRequestWarning 7 | import env 8 | 9 | requests.packages.urllib3.disable_warnings(InsecureRequestWarning) 10 | 11 | ########################################################################## 12 | # Delete a custom acoustic model 13 | ########################################################################## 14 | 15 | print("\nDeleting custom acoustic models...") 16 | 17 | headers = {'Content-Type' : "application/json"} 18 | uri = env.get_endpoint() + "/v1/acoustic_customizations/"+env.get_acoustic_id() 19 | resp = requests.delete(uri, auth=(env.get_username(),env.get_password()), verify=False, headers=headers) 20 | 21 | print("Delete acoustic models returns: ", resp.status_code) 22 | if resp.status_code != 200: 23 | print("Failed to delete acoustic model") 24 | print(resp.text) 25 | sys.exit(-1) 26 | 27 | sys.exit(0) 28 | -------------------------------------------------------------------------------- /cmd/delete_audio.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import requests 3 | import json 4 | import codecs 5 | import sys, time 6 | from requests.packages.urllib3.exceptions import InsecureRequestWarning 7 | import env 8 | 9 | requests.packages.urllib3.disable_warnings(InsecureRequestWarning) 10 | 11 | 12 | ########################################################################## 13 | # Delete an audio source file from the acoustic model 14 | ########################################################################## 15 | 16 | print("\nDeleting audio source ...") 17 | 18 | audio_name = env.get_arg("name of audio source") 19 | headers = {'Content-Type' : "application/json"} 20 | uri = env.get_endpoint() + "/v1/acoustic_customizations/"+env.get_acoustic_id()+"/audio/"+audio_name 21 | r = requests.delete(uri, auth=(env.get_username(),env.get_password()), verify=False, headers=headers) 22 | 23 | print("Delete audio source returns: ", r.status_code) 24 | if r.status_code != 200: 25 | print("Failed to delete audio source") 26 | print(r.text) 27 | sys.exit(-1) 28 | else: 29 | sys.exit(0) 30 | -------------------------------------------------------------------------------- /cmd/delete_corpus.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import requests 3 | import json 4 | import codecs 5 | import sys, time 6 | from requests.packages.urllib3.exceptions import InsecureRequestWarning 7 | import env 8 | 9 | requests.packages.urllib3.disable_warnings(InsecureRequestWarning) 10 | 11 | ########################################################################## 12 | # Delete a corpus file from a custom language model 13 | ########################################################################## 14 | 15 | corpusName = env.get_arg("corpus filename") 16 | 17 | print("\nDeleting corpus ...") 18 | 19 | headers = {'Content-Type' : "application/json"} 20 | uri = env.get_endpoint() + "/v1/customizations/"+env.get_language_id()+"/corpora/"+corpusName 21 | r = requests.delete(uri, auth=(env.get_username(),env.get_password()), verify=False, headers=headers) 22 | 23 | print("Delete corpus returns: ", r.status_code) 24 | if r.status_code != 200: 25 | print("Failed to get corpus") 26 | print(r.text) 27 | sys.exit(-1) 28 | else: 29 | sys.exit(0) 30 | -------------------------------------------------------------------------------- /cmd/delete_language_model.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import requests 3 | import json 4 | import codecs 5 | import sys, time 6 | from requests.packages.urllib3.exceptions import InsecureRequestWarning 7 | import env 8 | 9 | requests.packages.urllib3.disable_warnings(InsecureRequestWarning) 10 | 11 | ########################################################################## 12 | # Delete a custom language model 13 | ########################################################################## 14 | 15 | print("\nDeleting custom language model: ") 16 | 17 | headers = {'Content-Type' : "application/json"} 18 | uri = env.get_endpoint() + "/v1/customizations/"+env.get_language_id() 19 | resp = requests.delete(uri, auth=(env.get_username(),env.get_password()), verify=False, headers=headers) 20 | 21 | print("Delete language models returns: ", resp.status_code) 22 | if resp.status_code != 200: 23 | print("Failed to delete language model") 24 | print(resp.text) 25 | sys.exit(-1) 26 | 27 | sys.exit(0) 28 | -------------------------------------------------------------------------------- /cmd/env.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import os, sys 3 | 4 | ########################################################################## 5 | # Set your IBM Cloud credentials in the environment variables USERNAME 6 | # and PASSWORD 7 | # o If you use IAM service credentials, leave USERNAME set to "apikey" 8 | # and set PASSWORD to the value of your IAM API key. 9 | # o If you use pre-IAM service credentials, set the values to your USERNAME 10 | # and PASSWORD. 11 | # See the following instructions for getting your own credentials: 12 | # https://console.bluemix.net/docs/services/watson/getting-started-credentials.html 13 | ########################################################################## 14 | 15 | def get_username(): 16 | try: 17 | return os.environ['USERNAME'] 18 | except: 19 | print("Please set your username in the environment variable USERNAME.") 20 | print("If you use IAM service credentials, set USERNAME set to the string \"apikey\"") 21 | print("and set PASSWORD to the value of your IAM API key.") 22 | sys.exit(-1) 23 | 24 | 25 | def get_password(): 26 | try: 27 | return os.environ['PASSWORD'] 28 | except: 29 | print("Please set your password in the environment variable PASSWORD") 30 | print("If you use IAM service credentials, set USERNAME set to the string \"apikey\"") 31 | print("and set PASSWORD to the value of your IAM API key.") 32 | sys.exit(-1) 33 | 34 | 35 | def get_endpoint(): 36 | try: 37 | return os.environ['STT_ENDPOINT'] 38 | except: 39 | print("Please set the environment variable STT_ENDPOINT to the " 40 | "URL specified in your service credentials.") 41 | sys.exit(-1) 42 | 43 | 44 | def get_language_id (): 45 | try: 46 | return os.environ['LANGUAGE_ID'] 47 | except: 48 | print("Please set the id for your custom language model in the environment variable LANGUAGE_ID") 49 | sys.exit(-1) 50 | 51 | 52 | def get_acoustic_id (): 53 | try: 54 | return os.environ['ACOUSTIC_ID'] 55 | except: 56 | print("Please set the id for your custom acoustic model in the environment variable ACOUSTIC_ID") 57 | sys.exit(-1) 58 | 59 | 60 | def get_arg(help_string): 61 | if len(sys.argv)==1: 62 | print("Please specify ", help_string) 63 | sys.exit(-1) 64 | else: 65 | return str(sys.argv[1]) 66 | 67 | 68 | -------------------------------------------------------------------------------- /cmd/list_acoustic_model.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import requests 3 | import json 4 | import codecs 5 | import sys, time 6 | from requests.packages.urllib3.exceptions import InsecureRequestWarning 7 | import env 8 | 9 | requests.packages.urllib3.disable_warnings(InsecureRequestWarning) 10 | 11 | ########################################################################## 12 | # Get the list of custom acoustice models 13 | ########################################################################## 14 | 15 | print("\nGetting custom acoustic models...") 16 | 17 | headers = {'Content-Type' : "application/json"} 18 | uri = env.get_endpoint() + "/v1/acoustic_customizations" 19 | r = requests.get(uri, auth=(env.get_username(),env.get_password()), verify=False, headers=headers) 20 | 21 | print("Get acoustice models returns: ", r.status_code) 22 | print(r.text) 23 | 24 | sys.exit(0) 25 | -------------------------------------------------------------------------------- /cmd/list_all_models.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import requests 3 | import json 4 | import codecs 5 | import sys, time 6 | from requests.packages.urllib3.exceptions import InsecureRequestWarning 7 | import env 8 | 9 | requests.packages.urllib3.disable_warnings(InsecureRequestWarning) 10 | 11 | ########################################################################## 12 | # Get list of all custom models 13 | ########################################################################## 14 | 15 | print("\nGetting all models...") 16 | 17 | headers = {'Content-Type' : "application/json"} 18 | uri = env.get_endpoint() + "/v1/models" 19 | r = requests.get(uri, auth=(env.get_username(),env.get_password()), verify=False, headers=headers) 20 | 21 | print("Get all models returns: ", r.status_code) 22 | print(r.text) 23 | 24 | sys.exit(0) 25 | -------------------------------------------------------------------------------- /cmd/list_audio.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import requests 3 | import json 4 | import codecs 5 | import sys, time 6 | from requests.packages.urllib3.exceptions import InsecureRequestWarning 7 | import env 8 | 9 | requests.packages.urllib3.disable_warnings(InsecureRequestWarning) 10 | 11 | ########################################################################## 12 | # List the audio sources for an acoustic model 13 | ########################################################################## 14 | 15 | print("\nGetting audio sources ...") 16 | 17 | headers = {'Content-Type' : "application/json"} 18 | uri = env.get_endpoint() + "/v1/acoustic_customizations/"+env.get_acoustic_id()+"/audio/" 19 | r = requests.get(uri, auth=(env.get_username(),env.get_password()), verify=False, headers=headers) 20 | 21 | print("Get audio sources returns: ", r.status_code) 22 | if r.status_code != 200: 23 | print("Failed to get corpus") 24 | sys.exit(-1) 25 | else: 26 | print(r.text) 27 | sys.exit(0) 28 | -------------------------------------------------------------------------------- /cmd/list_corpus.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import requests 3 | import json 4 | import codecs 5 | import sys, time 6 | from requests.packages.urllib3.exceptions import InsecureRequestWarning 7 | import env 8 | 9 | requests.packages.urllib3.disable_warnings(InsecureRequestWarning) 10 | 11 | ########################################################################## 12 | # List the corpus for a custom langage model 13 | ########################################################################## 14 | 15 | print("\nGetting corpus ...") 16 | 17 | headers = {'Content-Type' : "application/json"} 18 | uri = env.get_endpoint() + "/v1/customizations/"+env.get_language_id()+"/corpora/" 19 | r = requests.get(uri, auth=(env.get_username(),env.get_password()), verify=False, headers=headers) 20 | 21 | print("Get corpus returns: ", r.status_code) 22 | if r.status_code != 200: 23 | print("Failed to get corpus") 24 | sys.exit(-1) 25 | else: 26 | print(r.text) 27 | sys.exit(0) 28 | -------------------------------------------------------------------------------- /cmd/list_language_model.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import requests 3 | import json 4 | import codecs 5 | import sys, time 6 | from requests.packages.urllib3.exceptions import InsecureRequestWarning 7 | import env 8 | 9 | requests.packages.urllib3.disable_warnings(InsecureRequestWarning) 10 | 11 | ########################################################################## 12 | # Get list of custom lanugage models 13 | ########################################################################## 14 | 15 | print("\nGetting custom language models...") 16 | 17 | headers = {'Content-Type' : "application/json"} 18 | uri = env.get_endpoint() + "/v1/customizations" 19 | r = requests.get(uri, auth=(env.get_username(),env.get_password()), verify=False, headers=headers) 20 | 21 | print("Get models returns: ", r.status_code) 22 | print(r.text) 23 | 24 | sys.exit(0) 25 | -------------------------------------------------------------------------------- /cmd/requirements.txt: -------------------------------------------------------------------------------- 1 | striprtf 2 | requests 3 | -------------------------------------------------------------------------------- /cmd/reset_language_model.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import requests 3 | import json 4 | import codecs 5 | import sys, time 6 | from requests.packages.urllib3.exceptions import InsecureRequestWarning 7 | import env 8 | 9 | requests.packages.urllib3.disable_warnings(InsecureRequestWarning) 10 | 11 | ########################################################################## 12 | # Resets a custom language model by removing all corpora, grammars, and words from 13 | # the model. Resetting a custom language model initializes the model to its state 14 | # when it was first created. Metadata such as the name and language of the model 15 | # are preserved, but the model's words resource is removed and must be re-created. 16 | # You must use credentials for the instance of the service that owns a model to reset it. 17 | ########################################################################## 18 | 19 | print("\nResetting custom language model...") 20 | 21 | headers = {'Content-Type' : "application/json"} 22 | uri = env.get_endpoint() + "/v1/customizations/"+env.get_language_id()+"/reset" 23 | r = requests.post(uri, auth=(env.get_username(),env.get_password()), verify=False, headers=headers) 24 | 25 | print("Reset model returns: ", r.status_code) 26 | print(r.text) 27 | 28 | sys.exit(0) 29 | -------------------------------------------------------------------------------- /cmd/train_acoustic_model.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import requests 3 | import json 4 | import codecs 5 | import sys, time 6 | from requests.packages.urllib3.exceptions import InsecureRequestWarning 7 | import env 8 | 9 | requests.packages.urllib3.disable_warnings(InsecureRequestWarning) 10 | 11 | ########################################################################## 12 | # Initiates the training of a custom acoustic model with new audio resources 13 | # using a language model as the base 14 | # A status of available means that the custom model is trained and ready to use. 15 | ########################################################################## 16 | 17 | print("\nTrain custom acoustic model...") 18 | 19 | headers = {'Content-Type' : "application/json"} 20 | uri = env.get_endpoint() + "/v1/acoustic_customizations/"+env.get_acoustic_id()+"/train?custom_language_model_id="+env.get_language_id() 21 | r = requests.post(uri, auth=(env.get_username(),env.get_password()), verify=False, headers=headers) 22 | 23 | print("Train acoustic model returns: ", r.status_code) 24 | 25 | sys.exit(0) 26 | -------------------------------------------------------------------------------- /cmd/train_language_model.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import requests 3 | import json 4 | import codecs 5 | import sys, time 6 | from requests.packages.urllib3.exceptions import InsecureRequestWarning 7 | import env 8 | 9 | requests.packages.urllib3.disable_warnings(InsecureRequestWarning) 10 | 11 | ########################################################################## 12 | # Initiate the training of a custom language model with new resources such as 13 | # corpora, grammars, and custom words 14 | # A status of available means that the custom model is trained and ready to use. 15 | ########################################################################## 16 | 17 | print("\nTrain custom language model...") 18 | 19 | headers = {'Content-Type' : "application/json"} 20 | uri = env.get_endpoint() + "/v1/customizations/"+env.get_language_id()+"/train" 21 | r = requests.post(uri, auth=(env.get_username(),env.get_password()), verify=False, headers=headers) 22 | 23 | print("Train language model returns: ", r.status_code) 24 | 25 | sys.exit(0) 26 | -------------------------------------------------------------------------------- /cmd/transcribe.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import requests 3 | import json 4 | import codecs 5 | import os, sys, time 6 | from requests.packages.urllib3.exceptions import InsecureRequestWarning 7 | import env 8 | 9 | requests.packages.urllib3.disable_warnings(InsecureRequestWarning) 10 | 11 | headers = {'Content-Type' : "audio/wav"} 12 | 13 | ########################################################################## 14 | # Transcribe an audio file using a custom language and acoustic model 15 | # Need to specify in the API call: 16 | # - the base model 17 | # - the language_customization_id 18 | # - the acoustic_customization_id 19 | ########################################################################## 20 | 21 | print("\nTranscribe an audio using: ") 22 | 23 | try: 24 | language_id = "&language_customization_id="+os.environ['LANGUAGE_ID'] 25 | print(" - custom language model (id: %s)" % os.environ['LANGUAGE_ID']) 26 | except: 27 | language_id = "" 28 | print(" - base language model") 29 | 30 | try: 31 | acoustic_id = "&acoustic_customization_id="+os.environ['ACOUSTIC_ID'] 32 | print(" - custom acoustic model (id: %s)" % os.environ['ACOUSTIC_ID']) 33 | except: 34 | acoustic_id = "" 35 | print(" - base acoustic model") 36 | 37 | 38 | audio_file = env.get_arg("audio file to transcribe") 39 | uri = env.get_endpoint() + "/v1/recognize?model=en-US_NarrowbandModel"+language_id+acoustic_id 40 | with open(audio_file, 'rb') as f: 41 | r = requests.post(uri, auth=(env.get_username(),env.get_password()), verify=False, headers=headers, data=f) 42 | 43 | output_file = open(audio_file.replace('.wav','') + '.transcript','w') 44 | transcript = "" 45 | print(r.json()) 46 | for result in r.json()['results']: 47 | for alternative in result['alternatives']: 48 | transcript += alternative['transcript'] 49 | 50 | print("Transcription: ") 51 | print(transcript) 52 | 53 | output_file.write(transcript) 54 | output_file.close() 55 | 56 | sys.exit(0) 57 | -------------------------------------------------------------------------------- /data/convert_rtf.py: -------------------------------------------------------------------------------- 1 | ########################################################################### 2 | # Simple program to convert .rtf text files in the Documents directory to 3 | # plain text format. 4 | ########################################################################### 5 | 6 | import glob 7 | from striprtf.striprtf import rtf_to_text 8 | 9 | if __name__ == '__main__': 10 | rtf_files = glob.glob('Documents/*.rtf') 11 | for rtf_name in rtf_files: 12 | txt_name = rtf_name.replace('.rtf','.txt') 13 | txt_file = open(txt_name, "x") 14 | content_txt = rtf_to_text(open(rtf_name).read()) 15 | txt_file.write(content_txt) 16 | txt_file.close() 17 | -------------------------------------------------------------------------------- /data/fixup.sed: -------------------------------------------------------------------------------- 1 | s/__*[a-zA-Z0-9:-]*// 2 | s/{period}/ . /g 3 | s/{comma}/ , /g 4 | s/YYYY//g 5 | s/yyyy//g 6 | s/xxx//g 7 | s/xx//g 8 | s/XXXXX//g 9 | s/xxxx//g 10 | s/.Next/. Next/g 11 | s/HEENT:/HEENT :/g 12 | s/\[skip\]//g 13 | s/ dication/ dictation/g 14 | s/ dicharge/ discharge/g 15 | s/ wsould/ would/g 16 | s/\([.]\) \([[:upper:]]\)/\1\ 17 | \2/g 18 | -------------------------------------------------------------------------------- /doc/cloud-deploy.md: -------------------------------------------------------------------------------- 1 | # Deploy web app on IBM Cloud using Cloud Foundry 2 | 3 | These are instructions for a quick deployment of the web app on IBM Cloud. 4 | 5 | ## Prerequisites 6 | 7 | * Ensure that the [IBM Cloud CLI](https://cloud.ibm.com/docs/cli/index.html?locale=en-US#overview) 8 | tool is installed locally. Follow the instructions in the linked documentation to 9 | configure your environment. 10 | 11 | ## Update manifest.yml 12 | 13 | In the `manifest.yml` file found in the root of the project, change the `route` value to a 14 | unique route of your choosing by replacing the `your-host` placeholder. For example, route could be 15 | `- route: my-stt-customizer.mybluemix.net`. Just make sure that the subdomain is not already taken. 16 | This route corresponds to the URL for which the app will be accessible from. Also, feel free 17 | to change the value for `name` which will correspond to the name of your Cloud Foundry app. 18 | 19 | ## Update client config.js 20 | 21 | In `client/src/config.js`, update the params `API_ENDPOINT` and `WS_ENDPOINT` to use the route you 22 | previously specified in the `manifest.yml` file. Using the above example route, this would be: 23 | 24 | ``` 25 | API_ENDPOINT: 'https://my-stt-customizer.mybluemix.net/api', 26 | WS_ENDPOINT: 'wss://my-stt-customizer.mybluemix.net/', 27 | ``` 28 | 29 | > **NOTE**: Both `https` and `wss` are used because Cloud Foundry apps on the IBM Cloud are hosted using HTTPS. 30 | 31 | ## Make sure build files are up to date 32 | 33 | From the root of the project, run: 34 | 35 | ```bash 36 | npm run build 37 | cd client && npm run build 38 | ``` 39 | 40 | If changes are made to the code, and the app needs to be updated, these will have to be run again. 41 | 42 | ## Watson credentials configuration 43 | 44 | There are two ways to get your Watson STT credentials into your app. 45 | 46 | **Option 1** 47 | 48 | Use the `services.json` file for which a sample was provided. You only have to fill it out and 49 | ensure it exists in the root of the project. 50 | 51 | **Option 2** 52 | 53 | Deploy the app, then connect your instance of the Watson STT service to the app. 54 | 1) This can be done in the app dashboard after you initially deploy the app by 55 | selecting `Connections` in the left menu, then clicking on the `Create connection` 56 | button. After clicking on this button, you can select an existing 57 | `Speech to Text` service that you have access to. Your app can be found from the IBM Cloud 58 | [resource list](https://cloud.ibm.com/resources). 59 | 2) After connecting the service, go to `Runtime` from the left menu, and select 60 | the `Environment variables` tab. You should now see the STT service in the 61 | `VCAP_SERVICES` section. Take note of the `name` of the service. 62 | 3) Scroll down to the `User defined` environment variables section, and click `Add`, then 63 | add the following key-value pair, replacing the placeholder with your service name: 64 | - `STT_SERVICE_NAME: ` 65 | 66 | --- 67 | 68 | The application first checks for the service in `VCAP_SERVICES` by searching for a name 69 | corresponding to the environment variable `STT_SERVICE_NAME`. If these don't exist, 70 | then the app will check for credentials in `services.json`. Some might find it preferable 71 | just to use the `services.json` file, and push it with the app. 72 | 73 | ## Deploy the app 74 | 75 | From the root of the project run: 76 | 77 | ```bash 78 | ibmcloud cf push 79 | ``` 80 | 81 | This will push a new app or update the app on the IBM Cloud. You can view your apps online from the 82 | IBM Cloud [resource list](https://cloud.ibm.com/resources). 83 | 84 | ## Visit deployed app 85 | 86 | In a browser, navigate to `https://`. 87 | -------------------------------------------------------------------------------- /doc/source/images/architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IBM/Train-Custom-Speech-Model/b58173699b336fe29273daace9a0041a7bcf80cf/doc/source/images/architecture.png -------------------------------------------------------------------------------- /doc/source/images/custom-word-list.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IBM/Train-Custom-Speech-Model/b58173699b336fe29273daace9a0041a7bcf80cf/doc/source/images/custom-word-list.png -------------------------------------------------------------------------------- /doc/source/images/main-page.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IBM/Train-Custom-Speech-Model/b58173699b336fe29273daace9a0041a7bcf80cf/doc/source/images/main-page.png -------------------------------------------------------------------------------- /doc/source/images/re-train.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IBM/Train-Custom-Speech-Model/b58173699b336fe29273daace9a0041a7bcf80cf/doc/source/images/re-train.png -------------------------------------------------------------------------------- /doc/source/images/training-panel.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IBM/Train-Custom-Speech-Model/b58173699b336fe29273daace9a0041a7bcf80cf/doc/source/images/training-panel.png -------------------------------------------------------------------------------- /manifest.yml: -------------------------------------------------------------------------------- 1 | --- 2 | applications: 3 | - name: watson-stt-customizer 4 | memory: 512M 5 | routes: 6 | - route: your-host.mybluemix.net 7 | -------------------------------------------------------------------------------- /model/user.json: -------------------------------------------------------------------------------- 1 | { 2 | "user1": { 3 | "password": "user1", 4 | "langModel": "custom-model-1", 5 | "acousticModel": "acoustic-model-1", 6 | "baseModel": "en-US_NarrowbandModel" 7 | }, 8 | "user2": { 9 | "password": "user2", 10 | "langModel": "custom-model-2", 11 | "acousticModel": "acoustic-model-2", 12 | "baseModel": "en-US_NarrowbandModel" 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "train-custom-speech-model", 3 | "version": "0.1.0", 4 | "description": "Train a Watson Speech model for a specialized domain to improve the speech to text performance", 5 | "main": "npm run-script start", 6 | "scripts": { 7 | "start": "npm run serve", 8 | "build": "npm run build-client && tsc", 9 | "build-client": "cd client && npm install", 10 | "serve": "node dist/server.js | bunyan", 11 | "test": "echo \"Error: no test specified\" && exit 1", 12 | "client": "cd client && npm run start", 13 | "server": "node dist/server.js", 14 | "dev": "npm run build && concurrently --kill-others-on-fail \"npm run start\" \"npm run client\"", 15 | "lint": "tslint --project tsconfig.json -c tslint.json 'server/**/*.ts' && npm run lint-client", 16 | "lint-client": "cd client && npm run lint" 17 | }, 18 | "repository": { 19 | "type": "git", 20 | "url": "git+https://github.com/IBM/Train-Custom-Speech-Model.git" 21 | }, 22 | "keywords": [ 23 | "Watson", 24 | "Speech-to-Text" 25 | ], 26 | "author": "htchang@us.ibm.com, ton@us.ibm.com, pvaneck@us.ibm.com, yh.wang@ibm.com", 27 | "license": "Apache-2.0", 28 | "bugs": { 29 | "url": "https://github.com/IBM/Train-Custom-Speech-Model/issues" 30 | }, 31 | "homepage": "https://github.com/IBM/Train-Custom-Speech-Model#readme", 32 | "devDependencies": { 33 | "@types/compression": "0.0.36", 34 | "@types/cors": "^2.8.4", 35 | "@types/express": "^4.16.0", 36 | "@types/express-bunyan-logger": "^1.3.0", 37 | "@types/express-session": "^1.15.6", 38 | "@types/multer": "^1.3.7", 39 | "@types/passport": "^0.4.6", 40 | "@types/passport-local": "^1.0.33", 41 | "@types/ws": "^6.0.1", 42 | "shelljs": "^0.8.2", 43 | "ts-node": "^7.0.1", 44 | "tslint": "^5.13.1", 45 | "typescript": "^3.0.1" 46 | }, 47 | "dependencies": { 48 | "cfenv": "^1.1.0", 49 | "compression": "^1.7.3", 50 | "concurrently": "^4.0.1", 51 | "cors": "^2.8.5", 52 | "express": "^4.16.3", 53 | "express-bunyan-logger": "^1.3.3", 54 | "express-session": "^1.15.6", 55 | "express-validator": "^5.3.0", 56 | "ibm-watson": "^4.2.1", 57 | "multer": "^1.3.1", 58 | "passport": "^0.4.0", 59 | "passport-local": "^1.0.0", 60 | "ws": "^6.2.0" 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /server/controllers/api.ts: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | import * as multer from 'multer'; 4 | import * as stream from 'stream'; 5 | import {WatsonSTT} from '../util'; 6 | import { Request, Response, RequestHandler } from 'express'; 7 | import { NextFunction } from 'connect'; 8 | 9 | declare global { 10 | namespace Express { 11 | interface Request { 12 | // Let's attach session scoped WatsonSTT 13 | // to Request directly 14 | watsonSTT?: WatsonSTT; 15 | log?: Console; 16 | } 17 | } 18 | } 19 | 20 | const upload = multer({ storage: multer.memoryStorage() }); 21 | 22 | /** 23 | * Handle the audio file upload. 24 | */ 25 | const uploadAudio: RequestHandler = upload.single('audio'); 26 | 27 | /** 28 | * POST /api/transcribe 29 | */ 30 | async function postTranscribe (req: Request, res: Response) { 31 | const bufferStream = new stream.PassThrough(); 32 | bufferStream.end( req.file.buffer ); 33 | const type = req.file.originalname.split('.').pop(); 34 | 35 | const result = await req.watsonSTT.transcribe(req.file.buffer, type, 36 | req.file.originalname, req.body.languageModel, req.body.acousticModel); 37 | 38 | if (result[0]) { 39 | req.log.error( 40 | `recognize call failed: ${JSON.stringify(result[0], null, 2)}`); 41 | return res.status(500).json({ 42 | error: result[0].code || 'failed to translate the file' 43 | }); 44 | } else { 45 | return res.status(200).json({tid: result[1]}); 46 | } 47 | } 48 | 49 | async function getModel(req: Request, res: Response) { 50 | const result = await req.watsonSTT.getLanguageModel(); 51 | if (result[0]) { 52 | req.log.error(`can not get model: ${JSON.stringify(result[0], null, 2)}`); 53 | return res.status(500).json({ 54 | error: result[0].error || 'can not get model info' 55 | }); 56 | } else { 57 | return res.status(200).json({ 58 | data: result[1] 59 | }); 60 | } 61 | } 62 | 63 | async function getAcousticModel(req: Request, res: Response) { 64 | const result = await req.watsonSTT.getAcousticModel(); 65 | if (result[0]) { 66 | req.log.error( 67 | `can not get acosutic model: ${JSON.stringify(result[0], null, 2)}`); 68 | return res.status(500).json({ 69 | error: result[0].error || 'can not get acoustic model info' 70 | }); 71 | } else { 72 | return res.status(200).json({ 73 | data: result[1] 74 | }); 75 | } 76 | } 77 | 78 | async function postAudio(req: Request, res: Response) { 79 | const bufferStream = new stream.PassThrough(); 80 | bufferStream.end( req.file.buffer ); 81 | 82 | const type = req.file.originalname.split('.').pop(); 83 | let contentType; 84 | if (type === 'zip') { 85 | contentType = 'application/zip'; 86 | } 87 | else if (['tgz', 'gz'].indexOf(type) >= 0) { 88 | contentType = 'application/gzip'; 89 | } 90 | // Else assume it is audio type. 91 | else { 92 | contentType = 'audio/' + type; 93 | } 94 | 95 | const params = { 96 | customization_id: req.watsonSTT.acousticModelId, 97 | content_type: contentType, 98 | audio_resource: bufferStream, 99 | audio_name: req.body.audioName 100 | }; 101 | const result = await req.watsonSTT.addAudio(params); 102 | 103 | if (result[0]) { 104 | req.log.error( 105 | `failed to add/update audio: ${JSON.stringify(result[0], null, 2)}`); 106 | return res.status(500).json({ 107 | error: result[0].error || 'failed to add/update audio' 108 | }); 109 | } else { 110 | return res.status(200).json({ 111 | status: 'added' 112 | }); 113 | } 114 | } 115 | 116 | async function listAudio(req: Request, res: Response) { 117 | const audioResources = await req.watsonSTT.listAudio(); 118 | if (audioResources[0]) { 119 | req.log.error( 120 | `failed to list audio: ${JSON.stringify(audioResources[0], null, 2)}`); 121 | return res.status(500).json({ 122 | error: audioResources[0].error || 'failed to list audio' 123 | }); 124 | } else { 125 | return res.status(200).json({ 126 | audio: audioResources[1].audio 127 | }); 128 | } 129 | } 130 | 131 | async function deleteAudio(req: Request, res: Response) { 132 | if (req.params.name) { 133 | const result = await req.watsonSTT.deleteAudio(req.params.name); 134 | if (result[0]) { 135 | req.log.error( 136 | `failed to delete audio: ${JSON.stringify(result[0], null, 2)}`); 137 | return res.status(500).json({ 138 | error: result[0].error || 'failed to delete audio' 139 | }); 140 | } else { 141 | return res.status(200).json({ 142 | audioName: result[1], 143 | status: 'deleted' 144 | }); 145 | } 146 | } else { 147 | return res.status(400).json({ 148 | error: 'No audio name specified.' 149 | }); 150 | } 151 | } 152 | 153 | async function postCorpus(req: Request, res: Response) { 154 | const result = await req.watsonSTT.addCorpus( 155 | req.body.corpusName, 156 | req.body.corpus); 157 | 158 | if (result[0]) { 159 | req.log.error( 160 | `failed to add/update corpus: ${JSON.stringify(result[0], null, 2)}`); 161 | return res.status(500).json({ 162 | error: result[0].error || 'failed to add/update corpus' 163 | }); 164 | } else { 165 | return res.status(200).json({ 166 | status: 'added' 167 | }); 168 | } 169 | } 170 | 171 | async function deleteCorpus(req: Request, res: Response) { 172 | if (req.params.name) { 173 | const result = await req.watsonSTT.deleteCorpus(req.params.name); 174 | if (result[0]) { 175 | req.log.error( 176 | `failed to delete corpus: ${JSON.stringify(result[0], null, 2)}`); 177 | return res.status(500).json({ 178 | error: result[0].error || 'failed to delete corpus' 179 | }); 180 | } else { 181 | return res.status(200).json({ 182 | corpusName: result[1], 183 | status: 'deleted' 184 | }); 185 | } 186 | } else { 187 | return res.status(400).json({ 188 | error: 'No corpus name specified.' 189 | }); 190 | } 191 | } 192 | 193 | async function getCorpora(req: Request, res: Response) { 194 | const corpora = await req.watsonSTT.getCorpora(); 195 | if (corpora[0]) { 196 | req.log.error( 197 | `failed to get corpora: ${JSON.stringify(corpora[0], null , 2)}`); 198 | return res.status(500).json({ 199 | error: corpora[0].error || 'failed to get corpora' 200 | }); 201 | } else { 202 | return res.status(200).json({ 203 | corpora: corpora[1].corpora 204 | }); 205 | } 206 | } 207 | 208 | async function getWords(req: Request, res: Response) { 209 | const words = await req.watsonSTT.listWords(); 210 | if (words[0]) { 211 | req.log.error(`failed to get words: ${JSON.stringify(words[0], null, 2)}`); 212 | return res.status(500).json({ 213 | error: words[0].error || 'failed to get words' 214 | }); 215 | } else { 216 | return res.status(200).json({ 217 | words: words[1].words 218 | }); 219 | } 220 | } 221 | 222 | async function addWord(req: Request, res: Response) { 223 | const result = await req.watsonSTT.addWord( 224 | req.body.word, req.body.sounds_like, req.body.display_as 225 | ); 226 | if (result[0]) { 227 | req.log.error(`failed to add word: ${JSON.stringify(result[0], null, 2)}`); 228 | return res.status(500).json({ 229 | error: result[0].error || 'failed to add the specified word' 230 | }); 231 | } else { 232 | return res.status(200).json({ 233 | word: req.body.word, 234 | status: 'added' 235 | }); 236 | } 237 | } 238 | 239 | async function deleteWord(req: Request, res: Response) { 240 | if (req.params.name) { 241 | const result = await req.watsonSTT.deleteWord(req.params.name); 242 | if (result[0]) { 243 | req.log.error( 244 | `failed to delete word: ${JSON.stringify(result[0], null, 2)}`); 245 | return res.status(500).json({ 246 | error: result[0].error || 'failed to delete the specified word' 247 | }); 248 | } else { 249 | return res.status(200).json({ 250 | word: req.params.name, 251 | status: 'deleted' 252 | }); 253 | } 254 | } else { 255 | return res.status(400).json({ 256 | error: 'No word name specified.' 257 | }); 258 | } 259 | } 260 | 261 | async function trainModel(req: Request, res: Response) { 262 | const result = await req.watsonSTT.trainModel(); 263 | if (result[0]) { 264 | req.log.error( 265 | `failed to train the model: ${JSON.stringify(result[0], null, 2)}`); 266 | return res.status(500).json({ 267 | error: result[0].error || 'failed to train the model' 268 | }); 269 | } else { 270 | return res.status(200).json({ 271 | status: 'started' 272 | }); 273 | } 274 | } 275 | 276 | async function trainAcousticModel(req: Request, res: Response) { 277 | // Get the customization ID of the custom language model to pass in for 278 | // training. 279 | const result = await req.watsonSTT.trainAcousticModel(); 280 | if (result[0]) { 281 | req.log.error( 282 | `failed to train acoustic model: ${JSON.stringify(result[0], null, 2)}`); 283 | return res.status(500).json({ 284 | error: result[0].error || 'failed to train acoustic model' 285 | }); 286 | } else { 287 | return res.status(200).json({ 288 | status: 'started' 289 | }); 290 | } 291 | } 292 | 293 | async function checkWatsonCredential( 294 | req: Request, res: Response, next: NextFunction) { 295 | const watsonSTT: WatsonSTT = await WatsonSTT.getInstance(req); 296 | if (watsonSTT === undefined) { 297 | req.log.error('Can not connect to Watson service'); 298 | next({ 299 | error: 'Can not connect to Watson service, please check server logs'}); 300 | } 301 | req.watsonSTT = watsonSTT; 302 | next(); 303 | } 304 | export { 305 | uploadAudio, postTranscribe, getModel, getAcousticModel, deleteCorpus, 306 | postCorpus, postAudio, listAudio, deleteAudio, getCorpora, getWords, 307 | addWord, deleteWord, trainModel, trainAcousticModel, checkWatsonCredential 308 | }; 309 | -------------------------------------------------------------------------------- /server/controllers/user.ts: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | import { Request, Response } from 'express'; 3 | import * as passport from 'passport'; 4 | import { User } from '../util'; 5 | 6 | /** 7 | * POST /login 8 | * Sign in using username and password. 9 | */ 10 | export let postLogin = (req: Request, res: Response) => { 11 | req.assert('username', 'Username cannot be blank').notEmpty(); 12 | req.assert('password', 'Password cannot be blank').notEmpty(); 13 | 14 | const errors = req.validationErrors(); 15 | 16 | if (errors) { 17 | return res.status(401).json({ 18 | error: 'There was an error logging in.', 19 | authenticated: false 20 | }); 21 | } 22 | passport.authenticate('local', (err: Error, user: User) => { 23 | if (err) { 24 | return res.status(401).json({ 25 | error: `There was an error authenticating: ${err}`, 26 | authenticated: false 27 | }); 28 | } 29 | if (!user) { 30 | return res.status(401).json({ 31 | error: 'Invalid credentials. Please try again.', 32 | authenticated: false 33 | }); 34 | } 35 | req.logIn(user, (err) => { 36 | if (err) { 37 | return res.status(401).json({ 38 | error: `There was an error authenticating: ${err}`, 39 | authenticated: false 40 | }); 41 | } 42 | return res.status(200).json({ 43 | user: req.user, 44 | authenticated: true 45 | }); 46 | }); 47 | return null; 48 | })(req, res); 49 | return null; 50 | }; 51 | 52 | /** 53 | * GET /user 54 | * Check authentication status of current user. 55 | */ 56 | export let getUser = (req: Request, res: Response) => { 57 | if (req.isAuthenticated()) { 58 | return res.status(200).json({ 59 | user: req.user, 60 | authenticated: true 61 | }); 62 | } 63 | else { 64 | return res.status(401).json({ 65 | error: 'User is not authenticated', 66 | authenticated: false 67 | }); 68 | } 69 | }; 70 | 71 | /** 72 | * POST /logout 73 | * Log out. 74 | */ 75 | export let postLogout = (req: Request, res: Response) => { 76 | req.logout(); 77 | res.status(200).send('OK'); 78 | }; 79 | -------------------------------------------------------------------------------- /server/routes/api.ts: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | import { Router } from 'express'; 3 | 4 | import * as apiController from '../controllers/api'; 5 | import * as userController from '../controllers/user'; 6 | 7 | class Api { 8 | public router: Router; 9 | public constructor() { 10 | this.router = Router(); 11 | this.init(); 12 | } 13 | private init() { 14 | // General STT API endpoints. 15 | this.router.get('/model', 16 | apiController.checkWatsonCredential, 17 | apiController.getModel); 18 | 19 | this.router.post('/transcribe', 20 | apiController.checkWatsonCredential, 21 | apiController.uploadAudio, 22 | apiController.postTranscribe); 23 | 24 | this.router.post('/train', 25 | apiController.checkWatsonCredential, 26 | apiController.trainModel); 27 | 28 | this.router.post('/train-acoustic', 29 | apiController.checkWatsonCredential, 30 | apiController.trainAcousticModel); 31 | 32 | this.router.get('/acoustic-model', 33 | apiController.checkWatsonCredential, 34 | apiController.getAcousticModel); 35 | 36 | this.router.get('/audio', 37 | apiController.checkWatsonCredential, 38 | apiController.listAudio); 39 | 40 | this.router.delete('/audio/:name', 41 | apiController.checkWatsonCredential, 42 | apiController.deleteAudio); 43 | 44 | this.router.post('/audio', 45 | apiController.checkWatsonCredential, 46 | apiController.uploadAudio, 47 | apiController.postAudio); 48 | 49 | this.router.get('/corpora', 50 | apiController.checkWatsonCredential, 51 | apiController.getCorpora); 52 | 53 | this.router.delete('/corpora/:name', 54 | apiController.checkWatsonCredential, 55 | apiController.deleteCorpus); 56 | 57 | this.router.post('/corpora', 58 | apiController.checkWatsonCredential, 59 | apiController.postCorpus); 60 | 61 | this.router.get('/words', 62 | apiController.checkWatsonCredential, 63 | apiController.getWords); 64 | 65 | this.router.post('/words', 66 | apiController.checkWatsonCredential, 67 | apiController.addWord); 68 | 69 | this.router.delete('/words/:name', 70 | apiController.checkWatsonCredential, 71 | apiController.deleteWord); 72 | 73 | // User endpoints 74 | this.router.post('/login', userController.postLogin); 75 | this.router.post('/logout', userController.postLogout); 76 | this.router.get('/user', userController.getUser); 77 | } 78 | } 79 | 80 | export let router = (new Api()).router; 81 | -------------------------------------------------------------------------------- /server/server.ts: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | /** 3 | * Module dependencies. 4 | */ 5 | import * as fs from 'fs'; 6 | import * as path from 'path'; 7 | import * as bodyParser from 'body-parser'; 8 | import * as compression from 'compression'; // compresses requests 9 | import * as cors from 'cors'; 10 | import * as express from 'express'; 11 | import * as session from 'express-session'; 12 | import {User, getCfenv, wsHandler } from './util'; 13 | import * as crypto from 'crypto'; 14 | import * as passport from 'passport'; 15 | import { Strategy as LocalStrategy } from 'passport-local'; 16 | import * as expressValidator from 'express-validator'; 17 | import * as bunyanFactory from 'express-bunyan-logger'; 18 | import { Server as WSServer } from 'ws'; 19 | import * as http from 'http'; 20 | 21 | /** 22 | * Routes 23 | */ 24 | import {router} from './routes/api'; 25 | 26 | /** 27 | * API keys and Passport configuration. 28 | */ 29 | class App { 30 | 31 | // ref to Express instance 32 | public express: express.Application; 33 | 34 | constructor() { 35 | this.express = express(); 36 | this.middleware(); 37 | this.routes(); 38 | this.launchConf(); 39 | } 40 | 41 | private middleware(): void { 42 | initPassport(); 43 | this.express.set('port', process.env.PORT || 5000); 44 | this.express.set('stt_service', getCfenv()); 45 | this.express.use(cors( 46 | { origin: 'http://localhost:3000', credentials: true } 47 | )); 48 | this.express.use(bunyanFactory({ 49 | excludes: ['req', 'res', 50 | 'req-headers', 'res-headers', 51 | 'response-hrtime', 'user-agent'], 52 | obfuscate: ['body.password'] 53 | })); 54 | this.express.use(compression()); 55 | this.express.use(expressValidator()); 56 | this.express.use(session({ 57 | resave: true, 58 | saveUninitialized: true, 59 | secret: crypto.randomBytes(64).toString('hex'), 60 | })); 61 | this.express.use(bodyParser.json({limit: '2mb'})); 62 | this.express.use(bodyParser.urlencoded({ extended: true })); 63 | this.express.use(passport.initialize()); 64 | this.express.use(passport.session()); 65 | this.express.use(isAuthenticated); 66 | } 67 | 68 | /** 69 | * Primary app routes. 70 | */ 71 | private routes(): void { 72 | this.express.use('/api', router); 73 | this.express.use( 74 | express.static(path.join(__dirname, '..', 'client', 'build')) 75 | ); 76 | this.express.get('/*', (req, res) => { 77 | res.sendFile( 78 | path.join(__dirname, '..', 'client' , 'build', 'index.html') 79 | ); 80 | }); 81 | } 82 | 83 | private launchConf() { 84 | 85 | /** 86 | * Start Express server. 87 | */ 88 | 89 | const server = http.createServer(); 90 | const wss = new WSServer({server}); 91 | server.on('request', this.express); 92 | wss.on('connection', wsHandler); 93 | server.listen(this.express.get('port'), () => { 94 | // tslint:disable-next-line:no-console 95 | console.log((' App is running at http://localhost:%d \ 96 | in %s mode'), this.express.get('port'), this.express.get('env')); 97 | // tslint:disable-next-line:no-console 98 | console.log(' Press CTRL-C to stop'); 99 | }) 100 | .setTimeout(600000); 101 | } 102 | } 103 | 104 | function initPassport() { 105 | /* 106 | * Sign in using Username and Password. 107 | */ 108 | 109 | const users = JSON.parse(fs.readFileSync( 110 | path.join(__dirname, '..', 'model', 'user.json')).toString()); 111 | 112 | passport.serializeUser((user: User, done) => { 113 | done(undefined, user.username); 114 | }); 115 | 116 | passport.deserializeUser((username: string, done) => { 117 | done(undefined, Object.assign( {username}, users[username])); 118 | }); 119 | 120 | passport.use(new LocalStrategy( 121 | { usernameField: 'username' }, 122 | (username: string, password: string, done: Function) => { 123 | if (users[username]) { 124 | if (users[username].password === password) { 125 | return done(undefined, 126 | Object.assign({username}, users[username])); 127 | } 128 | return done(undefined, 129 | false, 130 | { message: 'Invalid username or password.' }); 131 | } else { 132 | return done(undefined, 133 | false, 134 | { message: `user: ${username} doesn't exist` }); 135 | } 136 | })); 137 | } 138 | 139 | /** 140 | * Login Required middleware. 141 | */ 142 | function isAuthenticated (req: express.Request, res: express.Response, 143 | next: express.NextFunction) { 144 | if (req.isAuthenticated() || req.path === '/api/login' || 145 | !(req.path.includes('api'))) { 146 | return next(); 147 | } 148 | return res.status(401).json({ 149 | error: 'Not authorized to view this resource.' 150 | }); 151 | } 152 | 153 | export let server = (new App()).express; 154 | -------------------------------------------------------------------------------- /server/types/cfenv.d.ts: -------------------------------------------------------------------------------- 1 | declare module 'cfenv'; 2 | -------------------------------------------------------------------------------- /server/util.ts: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | import { Request } from 'express'; 4 | import * as cfenv from 'cfenv'; 5 | import * as fs from 'fs'; 6 | import * as path from 'path'; 7 | import SpeechToTextV1 = require('ibm-watson/speech-to-text/v1'); 8 | import * as STTDef from 'ibm-watson/speech-to-text/v1-generated'; 9 | import * as WebSocket from 'ws'; 10 | 11 | /** 12 | * Need the final property in SpeechToTextV1.SpeechRecognitionResult 13 | */ 14 | interface STTStreamResult { 15 | final?: boolean; 16 | } 17 | 18 | interface CfenvOpt { 19 | vcapFile?: string; 20 | } 21 | 22 | export interface User { 23 | username: string; 24 | langModel: string; 25 | acousticModel: string; 26 | baseModel: string; 27 | } 28 | 29 | export function getCfenv () { 30 | const cfenvOpt: CfenvOpt = {}; 31 | const serviceName = process.env.STT_SERVICE_NAME || 32 | 'code-pattern-custom-language-model'; 33 | 34 | const service = cfenv.getAppEnv(cfenvOpt).getService(serviceName); 35 | 36 | // If service was not found, fall back to services.json if it exists. 37 | const servicesFile = path.join(__dirname, '..', 'services.json'); 38 | if (!service && fs.existsSync(servicesFile)) { 39 | const creds = require(servicesFile); 40 | return creds.services[serviceName][0]; 41 | } 42 | // Just return the service. 43 | else { 44 | return service; 45 | } 46 | 47 | } 48 | 49 | export interface STTError { 50 | code?: string; 51 | error?: string; 52 | msg?: string; 53 | } 54 | 55 | interface RecognizeParams { 56 | objectMode?: boolean; 57 | interim_results?: boolean; 58 | content_type: string; 59 | model: string; 60 | language_customization_id?: string; 61 | acoustic_customization_id?: string; 62 | smart_formatting?: boolean; 63 | redaction?: boolean; 64 | word_alternatives_threshold?: number; 65 | timestamps?: boolean; 66 | } 67 | /** 68 | * A class that wraps Watson STTV1 api and mainly focus on custom 69 | * language/acoustic model management. 70 | */ 71 | export class WatsonSTT { 72 | readonly speech: SpeechToTextV1; 73 | readonly username: string; 74 | readonly langModelName: string; 75 | readonly langModelId: string; 76 | readonly acousticModelName: string; 77 | readonly acousticModelId: string; 78 | readonly baseModel: string; 79 | readonly speechModels: STTDef.SpeechModels; 80 | 81 | private constructor(speech: SpeechToTextV1, username: string, 82 | langModelName: string, langModelId: string, 83 | acousticModelName: string, acousticModelId: string, 84 | baseModel: string, speechModels: STTDef.SpeechModels) { 85 | this.speech = speech; 86 | this.username = username; 87 | this.langModelName = langModelName; 88 | this.langModelId = langModelId; 89 | this.acousticModelName = acousticModelName; 90 | this.acousticModelId = acousticModelId; 91 | this.baseModel = baseModel; 92 | this.speechModels = speechModels; 93 | 94 | } 95 | 96 | /** 97 | * Create a new instance of WatsonSTT or get it from user session. 98 | * @param req Request object from express middleware 99 | */ 100 | static async getInstance(req: Request): Promise { 101 | const rev: WatsonSTT = req.user._watsonSTT; 102 | if (rev && rev instanceof WatsonSTT) { 103 | return Promise.resolve(rev); 104 | } 105 | if (!req.app.get('stt_service') || 106 | !req.app.get('stt_service').credentials) { 107 | req.log.error('Can not get credentials for Watson service'); 108 | return Promise.resolve(undefined); 109 | } 110 | 111 | const speech = getSTTV1(req.app.get('stt_service').credentials); 112 | const langModelId = await getCustomLangModelId(speech, req.user); 113 | const speechModels = await getBaseModels(speech); 114 | if (langModelId[0]) { 115 | req.log.error(`Custom language model error: ${langModelId[0]}`); 116 | return Promise.resolve(undefined); 117 | } 118 | const acousticModelId = await getCustomAcousticModelId(speech, req.user); 119 | if (acousticModelId[0]) { 120 | req.log.error(`Custom acoustic model error: ${acousticModelId[0]}`); 121 | return Promise.resolve(undefined); 122 | } 123 | 124 | return Promise.resolve( 125 | new WatsonSTT(speech, req.user.username, 126 | req.user.langModel, langModelId[1], 127 | req.user.acousticModel, acousticModelId[1], 128 | req.user.baseModel, speechModels[1])); 129 | } 130 | 131 | async addCorpus(corpusName: string, corpus: string): Promise<[STTError]> { 132 | 133 | const addCorpusParams = { 134 | customization_id: this.langModelId, 135 | corpus_file: Buffer.from(corpus), 136 | corpus_name: corpusName, 137 | allow_overwrite: true 138 | }; 139 | 140 | return new Promise<[STTError]>( (resolve, reject) => { 141 | this.speech.addCorpus(addCorpusParams, (error: STTError) => { 142 | if (error) { 143 | resolve([error]); 144 | } else { 145 | resolve([undefined]); 146 | } 147 | }); 148 | }); 149 | } 150 | 151 | /** 152 | * Add the specified word into the custom language model 153 | * @param word new word 154 | * @param soundsLike sounds like string array 155 | * @param displayAs display as caption 156 | */ 157 | async addWord(word: string, soundsLike?: string[], displayAs?: string ) 158 | : Promise<[STTError]> { 159 | 160 | const addWordParams = { 161 | customization_id: this.langModelId, 162 | word_name: word, 163 | word, 164 | sounds_like: soundsLike, 165 | display_as: displayAs 166 | }; 167 | return new Promise<[STTError]>((resolve, reject) => { 168 | this.speech.addWord(addWordParams, (error: STTError) => { 169 | if (error) { 170 | resolve([error]); 171 | } else { 172 | resolve([undefined]); 173 | } 174 | }); 175 | }); 176 | } 177 | 178 | async transcribe(buff: Buffer, fileType: string, name: string, 179 | languageModel: string, acousticModel: string ): 180 | Promise<[STTError, number?]> { 181 | const recognizeParams: RecognizeParams = { 182 | objectMode: true, 183 | interim_results: true, 184 | content_type: `audio/${fileType}`, 185 | model: this.baseModel, 186 | smart_formatting: true, 187 | timestamps: true, 188 | word_alternatives_threshold: 0.9 189 | }; 190 | 191 | const langModelisBaseModel = this.isBaseModel(languageModel); 192 | const acousticModelisBaseModel = this.isBaseModel(acousticModel); 193 | 194 | // Enable client to pass in base model. 195 | if (langModelisBaseModel){ 196 | recognizeParams.model = languageModel; 197 | } 198 | 199 | if (! langModelisBaseModel) { 200 | recognizeParams.language_customization_id = this.langModelId; 201 | } 202 | 203 | if (! acousticModelisBaseModel) { 204 | recognizeParams.acoustic_customization_id = this.acousticModelId; 205 | } 206 | 207 | const tf: TranscribeFile = { 208 | tid: tid++, 209 | name, 210 | languageModel, 211 | acousticModel, 212 | ws: null 213 | }; 214 | // add TranscribeFile to queue and wait for client's response 215 | // then a corresponding WebSocket will be added 216 | addQueue(tf); 217 | // Create the stream. 218 | const sstream = this.speech.recognizeUsingWebSocket(recognizeParams); 219 | sstream.on('data', (event: STTDef.SpeechRecognitionResults) => { 220 | if(event.results[0] && tf.ws && 221 | (event.results[0] as STTStreamResult).final === true) { 222 | 223 | const result = event.results[0].alternatives[0]; 224 | const timestamps = result.timestamps; 225 | tf.ws.send(JSON.stringify( 226 | { transcript: result.transcript.trim(), 227 | start: timestamps[0][1], 228 | stop: timestamps[timestamps.length - 1][2] 229 | })); 230 | } 231 | }); 232 | sstream.on('error', (event) => { 233 | if(tf.ws) { 234 | tf.ws.send(JSON.stringify({error: event.message})); 235 | } 236 | delQueue(tf); 237 | }); 238 | sstream.on('close', () => { 239 | if (tf.ws) { 240 | tf.ws.send(JSON.stringify({finished: true})); 241 | } 242 | delQueue(tf); 243 | }); 244 | 245 | return new Promise<[STTError, number?]>( (resolve, reject) => { 246 | let cursor = 0; 247 | const threeMB = 1024 * 1024 * 3; 248 | while (true) { 249 | let end = cursor + threeMB; 250 | if (end > buff.byteLength) { 251 | end = buff.byteLength; 252 | sstream.end(buff.slice(cursor, end), () => { 253 | resolve([undefined, tf.tid]); 254 | }); 255 | break; 256 | } 257 | sstream.write(buff.slice(cursor, end)); 258 | cursor += threeMB; 259 | } 260 | }); 261 | } 262 | /** 263 | * Get corpus information by the corpus name 264 | * @param corpusName corpus name 265 | */ 266 | async getCorpus(corpusName: string): Promise<[STTError, STTDef.Corpus?]> { 267 | const getCorpusParams = { 268 | customization_id: this.langModelId, 269 | corpus_name: corpusName 270 | }; 271 | 272 | return new Promise<[STTError, STTDef.Corpus?]>((resolve, reject) => { 273 | this.speech.getCorpus(getCorpusParams, 274 | (error: STTError, corpus: STTDef.Corpus)=> { 275 | if (error) { 276 | resolve([error]); 277 | } else { 278 | resolve([undefined, corpus]); 279 | } 280 | }); 281 | }); 282 | } 283 | 284 | /** 285 | * Delete a corpus by the corpus name 286 | * @param corpusName corpus name 287 | */ 288 | async deleteCorpus(corpusName: string): Promise<[STTError, string?]> { 289 | const deleteCorpusParams = { 290 | customization_id: this.langModelId, 291 | corpus_name: corpusName 292 | }; 293 | 294 | return new Promise<[STTError, string?]>((resolve, reject) => { 295 | this.speech.deleteCorpus(deleteCorpusParams, 296 | (error: STTError, corpusName: string) => { 297 | if (error) { 298 | resolve([error]); 299 | } else { 300 | resolve([undefined, corpusName]); 301 | } 302 | }); 303 | }); 304 | } 305 | 306 | /** 307 | * Get Corpora information of the custom language model 308 | */ 309 | async getCorpora(): Promise<[STTError, STTDef.Corpora?]> { 310 | const getCorporaParams = { 311 | customization_id: this.langModelId, 312 | }; 313 | 314 | return new Promise<[STTError, STTDef.Corpora?]>((resolve, reject) => { 315 | this.speech.listCorpora(getCorporaParams, 316 | (error: STTError, corpora: STTDef.Corpora) => { 317 | if (error) { 318 | resolve([error]); 319 | } else { 320 | resolve([undefined, corpora]); 321 | } 322 | }); 323 | }); 324 | } 325 | 326 | /** 327 | * Kick of the custom language model train 328 | */ 329 | async trainModel(): Promise<[STTError]>{ 330 | return new Promise<[STTError]>((resolve, reject) => { 331 | this.speech.trainLanguageModel({customization_id: this.langModelId }, 332 | (error: STTError) => { 333 | if (error) { 334 | resolve([error]); 335 | } else { 336 | resolve([undefined]); 337 | } 338 | }); 339 | }); 340 | } 341 | 342 | /** 343 | * Get detailed information of the custom language model 344 | */ 345 | async getLanguageModel(): Promise<[STTError, STTDef.LanguageModel?]> { 346 | return new Promise<[STTError, STTDef.LanguageModel?]>( 347 | (resolve, reject) => { 348 | this.speech.getLanguageModel({customization_id: this.langModelId }, 349 | (error: STTError, languageModel: STTDef.LanguageModel) => { 350 | if (error) { 351 | resolve([error]); 352 | } else { 353 | resolve([undefined, languageModel]); 354 | } 355 | }); 356 | }); 357 | } 358 | 359 | /** 360 | * List words of the custom language model 361 | */ 362 | async listWords(): Promise<[STTError, STTDef.Words?]> { 363 | return new Promise<[STTError, STTDef.Words?]>((resolve, reject) => { 364 | this.speech.listWords({customization_id: this.langModelId }, 365 | (error: STTError, results: STTDef.Words) => { 366 | if (error) { 367 | resolve([error]); 368 | } else { 369 | resolve([undefined, results]); 370 | } 371 | }); 372 | }); 373 | } 374 | 375 | /** 376 | * Delete a specific word from the custom language model 377 | * @param word word 378 | */ 379 | async deleteWord(word: string): Promise<[STTError]> { 380 | const deleteWordParams = { 381 | customization_id: this.langModelId, 382 | word_name: word 383 | }; 384 | 385 | return new Promise<[STTError]>((resolve, reject) => { 386 | this.speech.deleteWord(deleteWordParams, (error: STTError) => { 387 | if (error) { 388 | resolve([error]); 389 | } else { 390 | resolve([undefined]); 391 | } 392 | }); 393 | }); 394 | } 395 | 396 | /** 397 | * Check a model name is one of the base models 398 | * 399 | * @param model model name 400 | */ 401 | isBaseModel(model: string): boolean { 402 | let found = false; 403 | if (this.speechModels.models.find((element): boolean => { 404 | return element.name === model; 405 | })){ 406 | return found = true; 407 | } 408 | return found; 409 | } 410 | 411 | /** 412 | * Get detailed information of the custom acoustic model 413 | */ 414 | async getAcousticModel(): Promise<[STTError, STTDef.AcousticModel?]> { 415 | return new Promise<[STTError, STTDef.AcousticModel?]>( 416 | (resolve, reject) => { 417 | this.speech.getAcousticModel( 418 | {customization_id: this.acousticModelId }, 419 | (error: STTError, acousticModel: STTDef.AcousticModel) => { 420 | if (error) { 421 | resolve([error]); 422 | } else { 423 | resolve([undefined, acousticModel]); 424 | } 425 | }); 426 | }); 427 | } 428 | 429 | /** 430 | * Kick of the custom acoustic model training process 431 | */ 432 | async trainAcousticModel(): Promise<[STTError]>{ 433 | const trainAcousticModelParams = { 434 | customization_id: this.acousticModelId, 435 | custom_language_model_id: this.langModelId 436 | }; 437 | return new Promise<[STTError]>((resolve, reject) => { 438 | this.speech.trainAcousticModel(trainAcousticModelParams, 439 | (error: STTError) => { 440 | if (error) { 441 | resolve([error]); 442 | } else { 443 | resolve([undefined]); 444 | } 445 | }); 446 | }); 447 | } 448 | 449 | /** 450 | * Add audio to custom acoustic model 451 | * @param params 452 | */ 453 | async addAudio(params: STTDef.AddAudioParams): Promise<[STTError]> { 454 | return new Promise<[STTError]>( (resolve, reject) => { 455 | this.speech.addAudio(params, (error: STTError) => { 456 | if (error) { 457 | resolve([error]); 458 | } else { 459 | resolve([undefined]); 460 | } 461 | }); 462 | }); 463 | } 464 | 465 | /** 466 | * List the audio of the custom acoustic model 467 | */ 468 | async listAudio(): Promise<[STTError, STTDef.AudioResources?]> { 469 | const listAudioParams = { 470 | customization_id: this.acousticModelId, 471 | }; 472 | 473 | return new Promise<[STTError, STTDef.AudioResources?]>( 474 | (resolve, reject) => { 475 | this.speech.listAudio(listAudioParams, 476 | (error: STTError, audioResources: STTDef.AudioResources) => { 477 | if (error) { 478 | resolve([error]); 479 | } else { 480 | resolve([undefined, audioResources]); 481 | } 482 | }); 483 | }); 484 | } 485 | 486 | /** 487 | * Delete a specific audio from the custom acoustic model 488 | * @param audioName name of the audio 489 | */ 490 | async deleteAudio(audioName: string): Promise<[STTError, string?]> { 491 | const deleteAudioParams = { 492 | customization_id: this.acousticModelId, 493 | audio_name: audioName 494 | }; 495 | return new Promise<[STTError, string?]>((resolve, reject) => { 496 | this.speech.deleteAudio(deleteAudioParams, 497 | (error: STTError, audioName: string) => { 498 | if (error) { 499 | resolve([error]); 500 | } else { 501 | resolve([undefined, audioName]); 502 | } 503 | }); 504 | }); 505 | } 506 | } 507 | 508 | export function getSTTV1 (credentials: STTCredential) { 509 | const options: STTDef.Options = Object.create(credentials); 510 | if ((credentials as STTCredentialAPIKey).apikey) { 511 | options.iam_apikey = (credentials as STTCredentialAPIKey).apikey; 512 | } 513 | return new SpeechToTextV1(options); 514 | } 515 | 516 | export type STTCredential = STTCredentialUserPass | STTCredentialAPIKey; 517 | 518 | /** 519 | * If the service is not using IAM yet, the credential would 520 | * be username and password. 521 | */ 522 | export interface STTCredentialUserPass { 523 | username: string; 524 | password: string; 525 | url: string; 526 | } 527 | 528 | /** 529 | * If the service is using IAM, the credential would be 530 | * api key. 531 | */ 532 | export interface STTCredentialAPIKey { 533 | apikey: string; 534 | url: string; 535 | } 536 | 537 | interface CustomModel { 538 | name: string; 539 | id: string; 540 | } 541 | 542 | // custom language model cache 543 | const models: CustomModel[] = []; 544 | // custom acoustic model cache 545 | const acoustics: CustomModel[] = []; 546 | 547 | /** 548 | * Get or create the custom model that belongs to the current 549 | * user and return the model id. 550 | * @param req The Request object of the express middleware 551 | */ 552 | function getCustomLangModelId( 553 | speech: SpeechToTextV1, user: User): Promise<[STTError, string?]> { 554 | const modelName = user.langModel; 555 | 556 | for (let index = 0, len = models.length; index < len; index++) { 557 | if (models[index].name === modelName) { 558 | return Promise.resolve<[STTError, string?]>( 559 | [undefined, models[index].id]); 560 | } 561 | } 562 | 563 | return new Promise<[STTError, string?]>((resolve) => { 564 | speech.listLanguageModels(null, 565 | (error: STTError, languageModels: STTDef.LanguageModels) => { 566 | if (error) { 567 | return resolve([error]); 568 | } else { 569 | const customModels = languageModels.customizations; 570 | if (customModels) { 571 | for (let i = 0, len = customModels.length; i < len; i++) { 572 | if (customModels[i].name === modelName) { 573 | models.push( 574 | { 575 | name: modelName, 576 | id: customModels[i].customization_id 577 | }); 578 | return resolve([undefined, customModels[i].customization_id]); 579 | } 580 | } 581 | } 582 | } 583 | // Need to create custom model here. 584 | // The default base model for which the initial creation of 585 | // custom language model is based on is configurable in model/user.json 586 | speech.createLanguageModel( 587 | { 588 | name: modelName, 589 | base_model_name: user.baseModel, 590 | description: `Custom model for ${user.username}`, 591 | }, 592 | (error: STTError, languageModel: STTDef.LanguageModel) => { 593 | if (error) { 594 | return resolve([error]); 595 | } else { 596 | models.push( 597 | {name: modelName, id: languageModel.customization_id}); 598 | return resolve([undefined, languageModel.customization_id]); 599 | } 600 | }); 601 | }); 602 | }); 603 | } 604 | /** 605 | * Get the list of supported base models 606 | * @param speech SpeechtoTextV1 607 | */ 608 | function getBaseModels(speech: SpeechToTextV1): 609 | Promise<[STTError,STTDef.SpeechModels?]>{ 610 | return new Promise<[STTError, STTDef.SpeechModels?]>( 611 | (resolve, reject) => { 612 | speech.listModels(null, 613 | (error: STTError, results: STTDef.SpeechModels) => { 614 | if (error) { 615 | resolve([error]); 616 | } else { 617 | resolve([undefined, results]); 618 | } 619 | }); 620 | }); 621 | } 622 | 623 | function getCustomAcousticModelId( 624 | speech: SpeechToTextV1, user: User): Promise<[STTError, string?]> { 625 | const modelName = user.acousticModel; 626 | for (let index = 0, len = acoustics.length; index < len; index++) { 627 | if (acoustics[index].name === modelName) { 628 | return Promise.resolve<[STTError, string?]>( 629 | [undefined, acoustics[index].id]); 630 | } 631 | } 632 | 633 | return new Promise<[STTError, string?]>((resolve) => { 634 | speech.listAcousticModels(null, 635 | (error: STTError, acousticModels: STTDef.AcousticModels) => { 636 | if (error) { 637 | return resolve([error]); 638 | } else { 639 | const customModels: STTDef.AcousticModel[] = 640 | acousticModels.customizations; 641 | if (customModels) { 642 | for (let i = 0, len = customModels.length; i < len; i++) { 643 | if (customModels[i].name === modelName) { 644 | acoustics.push( 645 | { 646 | name: modelName, 647 | id: customModels[i].customization_id 648 | }); 649 | return resolve([undefined, customModels[i].customization_id]); 650 | } 651 | } 652 | } 653 | } 654 | // Create custom acoustic model here if it doesn't exist. 655 | // The default base model for which the initial creation of 656 | // custom acoustic model is based on is configurable in model/user.json 657 | speech.createAcousticModel( 658 | { 659 | name: modelName, 660 | base_model_name: user.baseModel, 661 | description: `Custom acoustic model for ${user.username}`, 662 | }, 663 | (error: STTError, acousticModel: STTDef.AcousticModel) => { 664 | if (error) { 665 | return resolve([error]); 666 | } else { 667 | acoustics.push( 668 | { 669 | name: modelName, 670 | id: acousticModel.customization_id 671 | }); 672 | return resolve([undefined, acousticModel.customization_id]); 673 | } 674 | }); 675 | }); 676 | }); 677 | } 678 | 679 | const queue: Queue = {}; 680 | let tid = 0; 681 | interface Queue { 682 | [tid:string]: T; 683 | } 684 | 685 | interface TranscribeFile { 686 | tid: number; 687 | name:string; 688 | languageModel: string; 689 | acousticModel: string; 690 | ws: WebSocket; 691 | } 692 | 693 | const addQueue = (tf: TranscribeFile) => { 694 | queue[tf.tid] = tf; 695 | }; 696 | 697 | const delQueue = (tf: TranscribeFile) => { 698 | delete queue[tf.tid]; 699 | }; 700 | 701 | export let wsHandler = (socket: WebSocket): void => { 702 | socket.on('message', (message) => { 703 | if (typeof(message) === 'string') { 704 | const json: TranscribeFile = JSON.parse(message as string); 705 | const tf = queue[json.tid]; 706 | if (tf) { 707 | tf.ws = socket; 708 | tf.ws.onclose = tf.ws.onerror = () => { 709 | tf.ws = null; 710 | }; 711 | } 712 | } 713 | }); 714 | }; 715 | -------------------------------------------------------------------------------- /services.sample.json: -------------------------------------------------------------------------------- 1 | { 2 | "services": { 3 | "code-pattern-custom-language-model": [ 4 | { 5 | "credentials": { 6 | "apikey": "", 7 | "url": "" 8 | }, 9 | "label": "speech_to_text", 10 | "name": "code-pattern-custom-language-model" 11 | } 12 | ] 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "module": "commonjs", 4 | "moduleResolution": "node", 5 | "noImplicitAny": true, 6 | "sourceMap": true, 7 | "removeComments": true, 8 | "preserveConstEnums": true, 9 | "declaration": true, 10 | "target": "es5", 11 | "lib": ["es2015", "dom"], 12 | "outDir": "./dist", 13 | "noUnusedLocals": true, 14 | "noImplicitReturns": true, 15 | "noImplicitThis": true, 16 | "alwaysStrict": true, 17 | "noUnusedParameters": false, 18 | "pretty": true, 19 | "noFallthroughCasesInSwitch": true, 20 | "allowUnreachableCode": false, 21 | "experimentalDecorators": true 22 | }, 23 | "include": ["server/**/*"] 24 | } 25 | -------------------------------------------------------------------------------- /tslint.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": [], 3 | "rules": { 4 | "array-type": [true, "array-simple"], 5 | "arrow-return-shorthand": true, 6 | "ban": [true, 7 | ["fit"], 8 | ["fdescribe"], 9 | ["xit"], 10 | ["xdescribe"], 11 | ["fitAsync"], 12 | ["xitAsync"], 13 | ["fitFakeAsync"], 14 | ["xitFakeAsync"] 15 | ], 16 | "ban-types": [true, 17 | ["Object", "Use {} instead."], 18 | ["String", "Use 'string' instead."], 19 | ["Number", "Use 'number' instead."], 20 | ["Boolean", "Use 'boolean' instead."] 21 | ], 22 | "class-name": true, 23 | "curly": true, 24 | "interface-name": [true, "never-prefix"], 25 | "jsdoc-format": true, 26 | "forin": false, 27 | "label-position": true, 28 | "max-line-length": { 29 | "options": {"limit": 80, "ignore-pattern": "^import |^export "} 30 | }, 31 | "new-parens": true, 32 | "no-angle-bracket-type-assertion": true, 33 | "no-any": true, 34 | "no-construct": true, 35 | "no-consecutive-blank-lines": true, 36 | "no-debugger": true, 37 | "no-default-export": true, 38 | "no-inferrable-types": true, 39 | "no-namespace": [true, "allow-declarations"], 40 | "no-reference": true, 41 | "no-string-throw": true, 42 | "no-unused-expression": true, 43 | "no-var-keyword": true, 44 | "object-literal-shorthand": true, 45 | "only-arrow-functions": [true, "allow-declarations", "allow-named-functions"], 46 | "prefer-const": true, 47 | "quotemark": [true, "single"], 48 | "radix": true, 49 | "restrict-plus-operands": true, 50 | "semicolon": [true, "always", "ignore-bound-class-methods"], 51 | "switch-default": true, 52 | "triple-equals": [true, "allow-null-check"], 53 | "use-isnan": true, 54 | "use-default-type-parameter": true, 55 | "variable-name": [ 56 | true, 57 | "check-format", 58 | "ban-keywords", 59 | "allow-leading-underscore", 60 | "allow-trailing-underscore" 61 | ] 62 | } 63 | } 64 | --------------------------------------------------------------------------------