├── .devcontainer └── devcontainer.json ├── .eslintrc.jest.yml ├── .eslintrc.production.yml ├── .eslintrc.react.yml ├── .eslintrc.typescript.yml ├── .eslintrc.yml ├── .github ├── ISSUE_TEMPLATE │ ├── bug.yml │ └── config.yml ├── pull_request_template.md └── workflows │ ├── bump-dependencies.yml │ ├── bump-scaffold.yml │ ├── continuous-deployment.yml │ ├── list-outdated-dependencies.yml │ ├── prepare-release.yml │ ├── publish-release.yml │ └── pull-request-validation.yml ├── .gitignore ├── .prettierrc.yml ├── CHANGELOG.md ├── LICENSE ├── README.md ├── package-lock.json ├── package.json └── packages ├── integration-test ├── .eslintrc.custom.yml ├── .eslintrc.yml ├── .gitignore ├── babel.config.json ├── dummy.ts ├── importDefault.test.mjs ├── jest.config.json ├── package.json ├── requireDefault.test.mjs ├── tsconfig.custom.json └── tsconfig.json ├── mocked-speech-recognition ├── .eslintrc.custom.yml ├── .eslintrc.yml ├── .gitignore ├── jest.config.json ├── package.json ├── src │ ├── SpeechGrammarList.ts │ ├── SpeechRecognition.ts │ ├── SpeechRecognitionAlternative.ts │ ├── SpeechRecognitionErrorEvent.ts │ ├── SpeechRecognitionEvent.ts │ ├── SpeechRecognitionResult.ts │ ├── SpeechRecognitionResultList.ts │ ├── index.spec.ts │ ├── index.ts │ ├── tsconfig.custom.json │ ├── tsconfig.json │ ├── tsconfig.precommit.production.json │ ├── tsconfig.precommit.test.json │ └── tsconfig.test.json └── tsup.config.ts ├── pages ├── .eslintrc.custom.yml ├── .eslintrc.yml ├── .gitignore ├── package.json ├── public │ └── index.html └── src │ ├── App.tsx │ ├── DictationTextBox.tsx │ ├── index.css │ ├── index.tsx │ ├── tsconfig.custom.json │ └── tsconfig.json └── react-dictate-button ├── .eslintrc.custom.yml ├── .eslintrc.yml ├── .gitignore ├── __tests__ ├── __setup__ │ └── typingTestTransformer.js ├── abortOnUnmount.spec.tsx ├── continuous.spec.tsx ├── continuousModeNotSupported.spec.tsx ├── errorWithNoSpeech.tsx ├── multipleInterims.spec.tsx ├── simple.checkbox.spec.tsx ├── simple.spec.tsx ├── stopAfterOnDictate.spec.tsx ├── stopBeforeResult.spec.tsx ├── stopWithoutFinalize.spec.tsx └── types │ └── .gitignore ├── jest.config.json ├── package.json ├── src ├── Composer.tsx ├── Context.ts ├── DictateButton.tsx ├── DictateCheckbox.tsx ├── DictateEventHandler.ts ├── EndEventHandler.ts ├── ErrorEventHandler.ts ├── ProgressEventHandler.ts ├── RawEventHandler.ts ├── SpeechGrammarListPolyfill.ts ├── SpeechRecognitionPolyfill.ts ├── StartEventHandler.ts ├── TypedEventHandler.ts ├── env.d.ts ├── hooks │ ├── internal │ │ └── useDictateContext.ts │ ├── useAbortable.ts │ ├── useReadyState.ts │ └── useSupported.ts ├── index.spec.ts ├── index.ts ├── internal.ts ├── private │ └── assert.ts ├── tsconfig.custom.json ├── tsconfig.json ├── tsconfig.precommit.production.json ├── tsconfig.precommit.test.json ├── tsconfig.test.json ├── usePrevious.ts └── vendorPrefix.ts └── tsup.config.ts /.devcontainer/devcontainer.json: -------------------------------------------------------------------------------- 1 | { 2 | "forwardPorts": [8000], 3 | "image": "mcr.microsoft.com/devcontainers/typescript-node", 4 | "remoteUser": "node", 5 | "updateContentCommand": "npm clean-install && npm run build" 6 | } 7 | -------------------------------------------------------------------------------- /.eslintrc.jest.yml: -------------------------------------------------------------------------------- 1 | env: 2 | commonjs: true 3 | es2021: true 4 | es2022: true 5 | jest: true 6 | rules: 7 | # Disable for convenience 8 | react/display-name: off 9 | # Disable for convenience 10 | react/prop-types: off 11 | '@typescript-eslint/no-require-imports': off 12 | -------------------------------------------------------------------------------- /.eslintrc.production.yml: -------------------------------------------------------------------------------- 1 | extends: 2 | - plugin:import/recommended 3 | plugins: 4 | - import 5 | rules: 6 | import/no-deprecated: error 7 | import/no-empty-named-blocks: error 8 | import/no-extraneous-dependencies: error 9 | import/no-mutable-exports: error 10 | import/no-named-as-default: error 11 | import/no-named-as-default-member: error 12 | import/no-unused-modules: error 13 | import/no-amd: error 14 | import/no-commonjs: error 15 | import/no-absolute-path: error 16 | import/no-cycle: error 17 | import/no-dynamic-require: error 18 | import/no-self-import: error 19 | import/no-useless-path-segments: error 20 | import/no-webpack-loader-syntax: error 21 | import/consistent-type-specifier-style: 22 | - error 23 | - prefer-inline 24 | import/exports-last: error 25 | import/extensions: 26 | - error 27 | - ignorePackages # eslint-plugin-import does not understand named import 28 | import/first: error 29 | import/newline-after-import: error 30 | import/no-anonymous-default-export: error 31 | import/no-duplicates: error 32 | import/no-namespace: error 33 | import/no-unassigned-import: 34 | - error 35 | - allow: 36 | - '**/*.css' 37 | - dotenv/config 38 | settings: 39 | import/extensions: 40 | - .cjs 41 | - .mjs 42 | - .js 43 | - .jsx 44 | - .cts 45 | - .mts 46 | - .ts 47 | - .tsx 48 | import/resolver: 49 | node: true 50 | typescript: true 51 | -------------------------------------------------------------------------------- /.eslintrc.react.yml: -------------------------------------------------------------------------------- 1 | extends: 2 | - plugin:react/recommended 3 | plugins: 4 | - react 5 | settings: 6 | react: 7 | version: 18.3.1 8 | -------------------------------------------------------------------------------- /.eslintrc.typescript.yml: -------------------------------------------------------------------------------- 1 | extends: 2 | - plugin:@typescript-eslint/recommended 3 | parser: '@typescript-eslint/parser' 4 | plugins: 5 | - '@typescript-eslint' 6 | rules: 7 | # Shortening if-statement into &&, ||, or ternary operators. 8 | '@typescript-eslint/no-unused-expressions': off 9 | 10 | '@typescript-eslint/no-unused-vars': 11 | - error 12 | - argsIgnorePattern: ^_ 13 | caughtErrorsIgnorePattern: ^_ 14 | destructuredArrayIgnorePattern: ^_ 15 | varsIgnorePattern: ^_ 16 | -------------------------------------------------------------------------------- /.eslintrc.yml: -------------------------------------------------------------------------------- 1 | extends: 2 | - eslint:recommended 3 | overrides: 4 | - extends: .eslintrc.react.yml 5 | files: 6 | - '**/*.jsx' 7 | - '**/*.tsx' 8 | - extends: .eslintrc.typescript.yml 9 | files: 10 | - '**/*.cts' 11 | - '**/*.mts' 12 | - '**/*.ts' 13 | - '**/*.tsx' 14 | - extends: .eslintrc.jest.yml 15 | files: 16 | - '**/__tests__/**' 17 | - '**/*.spec.cjs' 18 | - '**/*.spec.mjs' 19 | - '**/*.spec.js' 20 | - '**/*.spec.jsx' 21 | - '**/*.spec.cts' 22 | - '**/*.spec.mts' 23 | - '**/*.spec.ts' 24 | - '**/*.spec.tsx' 25 | - '**/*.test.cjs' 26 | - '**/*.test.mjs' 27 | - '**/*.test.js' 28 | - '**/*.test.jsx' 29 | - '**/*.test.cts' 30 | - '**/*.test.mts' 31 | - '**/*.test.ts' 32 | - '**/*.test.tsx' 33 | - '**/test/**' 34 | - extends: .eslintrc.production.yml 35 | excludedFiles: 36 | - '**/__tests__/**' 37 | - '**/*.spec.cjs' 38 | - '**/*.spec.mjs' 39 | - '**/*.spec.js' 40 | - '**/*.spec.jsx' 41 | - '**/*.spec.cts' 42 | - '**/*.spec.mts' 43 | - '**/*.spec.ts' 44 | - '**/*.spec.tsx' 45 | - '**/*.test.cjs' 46 | - '**/*.test.mjs' 47 | - '**/*.test.js' 48 | - '**/*.test.jsx' 49 | - '**/*.test.cts' 50 | - '**/*.test.mts' 51 | - '**/*.test.ts' 52 | - '**/*.test.tsx' 53 | - '**/test/**' 54 | files: 55 | - '**/*.cjs' 56 | - '**/*.mjs' 57 | - '**/*.js' 58 | - '**/*.jsx' 59 | - '**/*.cts' 60 | - '**/*.mts' 61 | - '**/*.ts' 62 | - '**/*.tsx' 63 | parserOptions: 64 | ecmaVersion: latest 65 | sourceType: module 66 | plugins: 67 | - prettier 68 | root: true 69 | rules: 70 | prettier/prettier: error 71 | no-empty: 72 | - error 73 | - allowEmptyCatch: true 74 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug.yml: -------------------------------------------------------------------------------- 1 | name: Bug report 2 | description: Use this template to report a bug. 3 | labels: 4 | - bug 5 | body: 6 | - type: markdown 7 | attributes: 8 | value: | 9 | We run this project out of our spare time and may not monitor this repository every day. Our support capacity is very limited. 10 | 11 | Developers in professional capacity will receive prioritized support. 12 | - type: dropdown 13 | attributes: 14 | label: Version 15 | description: | 16 | Please verify the issue on latest versions. Support on non-latest version is minimal and on a per case basis. 17 | 18 | If you are using latest production, please verify against our latest development version as the issue could have been resolved recently. To install latest development version, run `npm install react-dictate-button@main`. 19 | 20 | multiple: true 21 | options: 22 | - Latest production (@latest) 23 | - Latest development (@main) 24 | - Not latest 25 | validations: 26 | required: true 27 | - type: dropdown 28 | attributes: 29 | label: Module resolution 30 | description: Please describe how you import our package. 31 | multiple: true 32 | options: 33 | - 'ESM: import { DictateButton } from "react-dictate-button"' 34 | - 'CommonJS: require("react-dictate-button")' 35 | - Others or unrelated 36 | validations: 37 | required: true 38 | - type: dropdown 39 | attributes: 40 | label: Bundler 41 | description: For apps, please tell us what bundler is used to create your app bundle. 42 | multiple: true 43 | options: 44 | - 'Webpack: Plain' 45 | - 'Webpack: Complex' 46 | - ESBuild 47 | - tsup 48 | - 'create-react-app' 49 | - Not an app 50 | - Others or unrelated 51 | validations: 52 | required: true 53 | - type: dropdown 54 | attributes: 55 | label: Environment 56 | description: | 57 | We support [Browserslist "defaults"](https://github.com/browserslist/browserslist#full-list) and [Internet Explorer mode](https://aka.ms/iemode). Support of other environments is minimal. 58 | 59 | When using in a sandboxed environment (e.g. SharePoint, Salesforce, etc.), please verify the issue outside of the sandbox. If the issue is related to the sandbox, please file it to your sandbox vendor. 60 | multiple: true 61 | options: 62 | - '> 0.5%, last 2 versions, Firefox ESR, not dead' 63 | - Internet Explorer mode 64 | - Server-side rendering 65 | - Others or unrelated 66 | validations: 67 | required: true 68 | - type: textarea 69 | attributes: 70 | label: Test case 71 | description: | 72 | Please write a minimal test case which fail the scenario with comments in [BDD format (given/when/then)](https://www.thoughtworks.com/insights/blog/applying-bdd-acceptance-criteria-user-stories). 73 | 74 | To protect from regressions, once the issue is resolved, your test case will be added to [our test suite](../tree/main/packages/integration-test/). 75 | 76 | For your convenience, a basic test case is provided below. For advanced scenarios, please look at [our test suite](../tree/main/packages/integration-test/). 77 | 78 | *Support will be slow or denied if a test case is not provided.* 79 | render: js 80 | value: | 81 | import { act, fireEvent, render, screen } from '@testing-library/react'; 82 | import React from 'react'; 83 | import { DictateButton } from 'react-dictate-button'; 84 | 85 | test('simple scenario', async () => { 86 | const handleDictate = jest.fn(); 87 | 88 | render( 89 | 94 | Click me 95 | 96 | ); 97 | 98 | act(() => { 99 | fireEvent.click(screen.getByText('Click me')) 100 | }); 101 | 102 | expect(handleDictate).toHaveBeenCalledTimes(1); 103 | }); 104 | - type: input 105 | attributes: 106 | label: Coding sandbox URL 107 | description: | 108 | If you have a minimal repro in a coding sandbox, please provide a URL here. Please prepare it from scratch. We cannot work directly on your app source code. 109 | placeholder: 'https://' 110 | - type: textarea 111 | attributes: 112 | label: Console errors 113 | description: Please copy any related errors printed to the console here. 114 | render: js 115 | - type: textarea 116 | attributes: 117 | label: Screenshots 118 | description: Please remove or obscure any personally identifiable information from your screenshots or recordings. 119 | - type: textarea 120 | attributes: 121 | label: Additional context 122 | description: If any of the answers is "others or unrelated", please explain it here. 123 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | blank_issues_enabled: false 2 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | ## Changelog 2 | 3 | > Please copy and paste new entries from `CHANGELOG.md` here. 4 | 5 | ## Specific changes 6 | 7 | > Please list each individual specific change in this pull request. 8 | 9 | - -------------------------------------------------------------------------------- /.github/workflows/bump-dependencies.yml: -------------------------------------------------------------------------------- 1 | name: Bump dependencies 2 | 3 | on: 4 | workflow_dispatch: {} 5 | 6 | jobs: 7 | call-workflow: 8 | permissions: 9 | contents: write 10 | id-token: write 11 | secrets: 12 | APP_ID: ${{ secrets.WORKFLOW_BOT_APP_ID }} 13 | PRIVATE_KEY: ${{ secrets.WORKFLOW_BOT_PRIVATE_KEY }} 14 | uses: compulim/workflows/.github/workflows/bump-dependencies.yml@main 15 | -------------------------------------------------------------------------------- /.github/workflows/bump-scaffold.yml: -------------------------------------------------------------------------------- 1 | name: Bump scaffold 2 | 3 | on: 4 | workflow_dispatch: 5 | inputs: 6 | package-name: 7 | default: react-dictate-button 8 | description: Name of the package 9 | required: true 10 | type: string 11 | use-react: 12 | default: true 13 | description: Use React 14 | required: true 15 | type: boolean 16 | skip-integration-test: 17 | default: false 18 | description: Skip integration test 19 | required: true 20 | type: boolean 21 | 22 | jobs: 23 | call-workflow: 24 | permissions: 25 | contents: write 26 | id-token: write 27 | secrets: 28 | APP_ID: ${{ secrets.WORKFLOW_BOT_APP_ID }} 29 | PRIVATE_KEY: ${{ secrets.WORKFLOW_BOT_PRIVATE_KEY }} 30 | uses: compulim/workflows/.github/workflows/bump-scaffold.yml@main 31 | with: 32 | package-name: ${{ inputs.package-name }} 33 | skip-integration-test: ${{ inputs.skip-integration-test }} 34 | use-react: ${{ inputs.use-react }} 35 | -------------------------------------------------------------------------------- /.github/workflows/continuous-deployment.yml: -------------------------------------------------------------------------------- 1 | name: Continuous deployment 2 | 3 | on: 4 | push: 5 | branches: [main] 6 | paths-ignore: 7 | - '.devcontainer/**' 8 | - '.github/**' 9 | workflow_dispatch: {} 10 | 11 | jobs: 12 | call-workflow: 13 | permissions: 14 | attestations: write 15 | contents: write 16 | id-token: write 17 | pages: write 18 | secrets: 19 | APP_ID: ${{ secrets.WORKFLOW_BOT_APP_ID }} 20 | NPM_TOKEN: ${{ secrets.NPM_TOKEN }} 21 | PRIVATE_KEY: ${{ secrets.WORKFLOW_BOT_PRIVATE_KEY }} 22 | uses: compulim/workflows/.github/workflows/continuous-deployment.yml@main 23 | with: 24 | package-name: react-dictate-button 25 | -------------------------------------------------------------------------------- /.github/workflows/list-outdated-dependencies.yml: -------------------------------------------------------------------------------- 1 | name: List outdated dependencies 2 | 3 | on: 4 | schedule: 5 | - cron: '0 4 * * 1' 6 | workflow_dispatch: {} 7 | 8 | jobs: 9 | call-workflow: 10 | permissions: 11 | contents: read 12 | uses: compulim/workflows/.github/workflows/list-outdated-dependencies.yml@main 13 | -------------------------------------------------------------------------------- /.github/workflows/prepare-release.yml: -------------------------------------------------------------------------------- 1 | name: Prepare release 2 | 3 | on: 4 | workflow_dispatch: 5 | inputs: 6 | version-to-bump: 7 | default: patch 8 | description: Version to bump 9 | options: 10 | - major 11 | - minor 12 | - patch 13 | required: true 14 | type: choice 15 | 16 | jobs: 17 | call-workflow: 18 | permissions: 19 | contents: write 20 | id-token: write 21 | secrets: 22 | APP_ID: ${{ secrets.WORKFLOW_BOT_APP_ID }} 23 | PRIVATE_KEY: ${{ secrets.WORKFLOW_BOT_PRIVATE_KEY }} 24 | uses: compulim/workflows/.github/workflows/prepare-release.yml@main 25 | with: 26 | version-to-bump: ${{ inputs.version-to-bump }} 27 | -------------------------------------------------------------------------------- /.github/workflows/publish-release.yml: -------------------------------------------------------------------------------- 1 | name: Publish release 2 | 3 | on: 4 | release: 5 | types: [created] 6 | 7 | jobs: 8 | call-workflow: 9 | permissions: 10 | contents: write 11 | pages: write 12 | id-token: write 13 | secrets: 14 | APP_ID: ${{ secrets.WORKFLOW_BOT_APP_ID }} 15 | NPM_TOKEN: ${{ secrets.NPM_TOKEN }} 16 | PRIVATE_KEY: ${{ secrets.WORKFLOW_BOT_PRIVATE_KEY }} 17 | uses: compulim/workflows/.github/workflows/publish-release.yml@main 18 | with: 19 | package-name: react-dictate-button 20 | tag: ${{ github.ref_name }} 21 | -------------------------------------------------------------------------------- /.github/workflows/pull-request-validation.yml: -------------------------------------------------------------------------------- 1 | name: Pull request validation 2 | 3 | on: 4 | pull_request: 5 | branches: [main] 6 | workflow_dispatch: {} 7 | 8 | jobs: 9 | call-workflow: 10 | permissions: 11 | contents: read 12 | strategy: 13 | matrix: 14 | switch: [current, react-16, react-17, react-18] 15 | uses: compulim/workflows/.github/workflows/pull-request-validation.yml@main 16 | with: 17 | package-name: react-dictate-button 18 | skip-integration-test: false 19 | switch: ${{ matrix.switch }} 20 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /node_modules 2 | -------------------------------------------------------------------------------- /.prettierrc.yml: -------------------------------------------------------------------------------- 1 | arrowParens: avoid 2 | bracketSpacing: true 3 | endOfLine: auto 4 | jsxBracketSameLine: false 5 | printWidth: 120 6 | proseWrap: preserve 7 | quoteProps: as-needed 8 | semi: true 9 | singleQuote: true 10 | tabWidth: 2 11 | trailingComma: none 12 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | All notable changes to this project will be documented in this file. 4 | 5 | The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) 6 | and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). 7 | 8 | Breaking changes are indicated by 💥. 9 | 10 | ## [4.0.0] - 2025-02-13 11 | 12 | ### Added 13 | 14 | - Works with Web Speech API provider without `resultIndex` in `SpeechRecognitionResultEvent`, by [@compulim](https://github.com/compulim), in PR [#86](https://github.com/compulim/react-dictate-button/pull/86) 15 | 16 | ### Changed 17 | 18 | - Reduced React version requirement from 16.9.0 to 16.8.6, by [@compulim](https://github.com/compulim), in PR [#83](https://github.com/compulim/react-dictate-button/pull/83) 19 | - 💥 Stopping an unabortable recognition (`SpeechRecognition.abort()` is undefined) will warn instead of throw, by [@compulim](https://github.com/compulim), in PR [#88](https://github.com/compulim/react-dictate-button/pull/88) 20 | 21 | ### Fixed 22 | 23 | - Fixed `dictate` event should dispatch before `end` event, by [@compulim](https://github.com/compulim), in PR [#87](https://github.com/compulim/react-dictate-button/pull/87) 24 | - Fixed [#84](https://github.com/compulim/react-dictate-button/issues/84). Logics should relies on `SpeechRecognition.continuous` property than `continuous` props, by [@compulim](https://github.com/compulim), in PR [#87](https://github.com/compulim/react-dictate-button/pull/87) 25 | - Fixed `end` event should only be dispatched after `SpeechRecognition.error` event, instead of always emit on stop/unmount, by [@compulim](https://github.com/compulim), in PR [#88](https://github.com/compulim/react-dictate-button/pull/88) 26 | 27 | ## [3.0.0] - 2025-01-31 28 | 29 | ### Added 30 | 31 | - Added continuous mode support, by [@RushikeshGavali](https://github.com/RushikeshGavali) and [@compulim](https://github.com/compulim), in PR [#73](https://github.com/compulim/react-dictate-button/pull/73) and PR [#78](https://github.com/compulim/react-dictate-button/pull/78) 32 | - In continuous mode, `dictate` and `progress` events will only report the latest round of recognition 33 | - After `end`, the push button and checkbox will not be turned off automatically 34 | - When stopping the recognition, it will emit an `error` event of error `aborted`, this is the behavior same as in non-continuous mode 35 | - If the underlying Web Speech API implementation does not support continuous mode, it should work as if in interactive mode 36 | - Added `start` and `end` events, by [@compulim](https://github.com/compulim), in PR [#78](https://github.com/compulim/react-dictate-button/pull/78) 37 | 38 | ### Changed 39 | 40 | - Modernized project scaffold, by [@compulim](https://github.com/compulim), in PR [#74](https://github.com/compulim/react-dictate-button/pull/74) 41 | - Ported all code to TypeScript 42 | - Monorepo management changed to [npm workspaces](https://docs.npmjs.com/cli/v11/using-npm/workspaces) from [lerna](https://lerna.js.org/) 43 | - Bundler changed to [esbuild](https://esbuild.github.io/)/[tsup](https://github.com/egoist/tsup) from [Babel](https://babeljs.io/) 44 | - Test environment changed to [Happy DOM](https://github.com/capricorn86/happy-dom) from [JSDOM](https://github.com/jsdom/jsdom) 45 | - Added ES Modules in additional to CommonJS 46 | - Removed [`husky`](https://www.npmjs.com/package/husky) and [`lint-staged`](https://www.npmjs.com/package/lint-staged) 47 | 48 | ### Removed 49 | 50 | - 💥 Deprecated `Context`, use `useAbortable`, `useReadyState`, and `useSupported` hooks respectively, by [@compulim](https://github.com/compulim), in PR [#74](https://github.com/compulim/react-dictate-button/pull/74) 51 | - 💥 Deprecated default exports, use `import { DictateButton } from 'react-dictate-button'` instead, by [@compulim](https://github.com/compulim), in PR [#74](https://github.com/compulim/react-dictate-button/pull/74) 52 | - 💥 Removed `defaultProps` and `propTypes`, by [@compulim](https://github.com/compulim), in PR [#74](https://github.com/compulim/react-dictate-button/pull/74) 53 | 54 | ## [2.0.1] - 2021-06-01 55 | 56 | ### Fixed 57 | 58 | - Fixed [#65](https://github.com/compulim/react-dictate-button/issues/65). Setting `started` to `false` after `onDictate` callback should succeed even on an unabortable recognition, by [@compulim](https://github.com/compulim), in PR [#66](https://github.com/compulim/react-dictate-button/pull/66) 59 | 60 | ## [2.0.0] - 2021-05-16 61 | 62 | ### Added 63 | 64 | - Added `module` field for exporting in ES Modules, by [@compulim](https://github.com/compulim) in PR [#58](https://github.com/compulim/react-dictate-button/pull/58) 65 | - Added [`eslint`](https://npmjs.com/package/eslint), [`husky`](https://npmjs.com/package/husky), and [`lint-staged`](https://npmjs.com/package/lint-staged) for code hygiene, by [@compulim](https://github.com/compulim) in PR [#58](https://github.com/compulim/react-dictate-button/pull/58) 66 | - Added and exported hooks: `useAbortable`, `useReadyState`, and `useSupported`, by [@compulim](https://github.com/compulim) in PR [#58](https://github.com/compulim/react-dictate-button/pull/58) 67 | 68 | ### Changed 69 | 70 | - 💥 Requires [`react@>=16.8.0`](https://npmjs.com/package/react) and [`core-js@3`](https://npmjs.com/package/core-js`) 71 | - 💥 Modifying props while recognition has started will no longer abort recognition immediately, props will be updated in next recognition 72 | - 💥 `SpeechGrammarList` is only constructed when `grammar` props is present 73 | - 💥 If `speechRecognition` prop is not present, capability detection is now done through `window.mediaDevices.getUserMedia` 74 | - Bumped all dependencies and removed unneeded dependencies, by [@compulim](https://github.com/compulim) in PR [#58](https://github.com/compulim/react-dictate-button/pull/58) 75 | - [`@babel/cli@7.13.16`](https://npmjs.com/package/@babel/cli) 76 | - [`@babel/core@7.14.2`](https://npmjs.com/package/@babel/core) 77 | - [`@babel/preset-env@7.14.2`](https://npmjs.com/package/@babel/preset-env) 78 | - [`@babel/preset-react@7.13.13`](https://npmjs.com/package/@babel/preset-react) 79 | - [`lerna@4.0.0`](https://npmjs.com/package/lerna) 80 | - [`react@16.8.0`](https://npmjs.com/package/react) 81 | 82 | ### Fixed 83 | 84 | - Fixed [#39](https://github.com/compulim/react-dictate-button/issues/39), added `type="button"` attribute to ``, by [@compulim](https://github.com/compulim) in PR [#58](https://github.com/compulim/react-dictate-button/pull/58) 85 | 86 | ## [1.2.2] - 2020-02-27 87 | 88 | ### Fixed 89 | 90 | - Fixed [#12](https://github.com/compulim/react-dictate-button/issues/12), workaround [Angular/zone.js bug](https://github.com/angular/angular/issues/31750) by not using Symbol.iterator for iterable objects, by [@compulim](https://github.com/compulim) in PR [#13](https://github.com/compulim/react-dictate-button/pull/13) 91 | 92 | ## [1.2.1] - 2019-12-04 93 | 94 | ### Fixed 95 | 96 | - `Composer.onProgress` should set `abortable` on the first event (based on `SpeechRecognition.audioStart` event), by [@compulim](https://github.com/compulim) in PR [#5](https://github.com/compulim/react-dictate-button/pull/5) 97 | 98 | ## [1.2.0] - 2019-12-03 99 | 100 | ### Added 101 | 102 | - Support unabortable speech recognition, by [@compulim](https://github.com/compulim) in PR [#4](https://github.com/compulim/react-dictate-button/pull/4). 103 | 104 | ### Changed 105 | 106 | - Bumped to [`event-as-promise@1.0.5`](https://npmjs.com/package/event-as-promise/v/1.0.5) 107 | - Moved `lerna bootstrap` from hoisted to local 108 | 109 | ## [1.1.3] - 2018-07-19 110 | 111 | ### Fixed 112 | 113 | - Moved [`memoize-one`](https://npmjs.com/package/memoize-one) to production dependencies 114 | 115 | ## [1.1.2] - 2018-06-29 116 | 117 | ### Added 118 | 119 | - Added `onClick` prop, can use `preventDefault` to stop speech recognition from starting 120 | 121 | ### Fixed 122 | 123 | - Fixed `Composer.speechRecognition`/`speechGrammarList` should not be required 124 | 125 | ## [1.1.1] - 2018-06-29 126 | 127 | ### Fixed 128 | 129 | - Fixed `extra` prop not passed to `` 130 | 131 | ## [1.1.0] - 2018-06-29 132 | 133 | ### Added 134 | 135 | - Added `extra` prop to copy to `SpeechRecognition` 136 | 137 | ### Changed 138 | 139 | - Bumped to [`memoize-one@4.0.0`](https://npmjs.com/package/memoize-one/v/4.0.0) 140 | 141 | ## [1.0.0] - 2018-06-26 142 | 143 | ### Added 144 | 145 | - Initial release 146 | 147 | [4.0.0]: https://github.com/compulim/react-dictate-button/compare/v3.0.0...v4.0.0 148 | [3.0.0]: https://github.com/compulim/react-dictate-button/compare/v2.0.1...v3.0.0 149 | [2.0.1]: https://github.com/compulim/react-dictate-button/compare/v2.0.0...v2.0.1 150 | [2.0.0]: https://github.com/compulim/react-dictate-button/compare/v1.2.2...v2.0.0 151 | [1.2.2]: https://github.com/compulim/react-dictate-button/compare/v1.2.1...v1.2.2 152 | [1.2.1]: https://github.com/compulim/react-dictate-button/compare/v1.2.0...v1.2.1 153 | [1.2.0]: https://github.com/compulim/react-dictate-button/compare/v1.1.3...v1.2.0 154 | [1.1.3]: https://github.com/compulim/react-dictate-button/compare/v1.1.2...v1.1.3 155 | [1.1.2]: https://github.com/compulim/react-dictate-button/compare/v1.1.1...v1.1.2 156 | [1.1.1]: https://github.com/compulim/react-dictate-button/compare/v1.1.0...v1.1.1 157 | [1.1.0]: https://github.com/compulim/react-dictate-button/compare/v1.0.0...v1.1.0 158 | [1.0.0]: https://github.com/compulim/react-dictate-button/releases/tag/v1.0.0 159 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 William Wong 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # react-dictate-button 2 | 3 | [![npm version](https://badge.fury.io/js/react-dictate-button.svg)](https://badge.fury.io/js/react-dictate-button) [![Build Status](https://travis-ci.org/compulim/react-dictate-button.svg?branch=master)](https://travis-ci.org/compulim/react-dictate-button) 4 | 5 | A button to start speech recognition using [Web Speech API](https://developer.mozilla.org/en-US/docs/Web/API/Web_Speech_API/Using_the_Web_Speech_API), with an easy to understand event lifecycle. 6 | 7 | # Breaking changes 8 | 9 | ## [2.0.0] - 2021-05-15 10 | 11 | - Requires [`react@>=16.8.0`](https://npmjs.com/package/react) and [`core-js@3`](https://npmjs.com/package/core-js`) 12 | - Modifying props while recognition has started will no longer abort recognition immediately, props will be updated in next recognition 13 | - `SpeechGrammarList` is only constructed when `grammar` props is present 14 | - If `speechRecognition` prop is not present, capability detection is now done through `window.mediaDevices.getUserMedia` 15 | 16 | # Demo 17 | 18 | Try out this component at [github.io/compulim/react-dictate-button](https://github.io/compulim/react-dictate-button/). 19 | 20 | # Background 21 | 22 | Reasons why we need to build our own component, instead of using [existing packages](https://www.npmjs.com/search?q=react%20speech) on NPM: 23 | 24 | - Most browsers required speech recognition (or WebRTC) to be triggered by a user event (button click) 25 | - Bring your own engine for [Web Speech API](https://developer.mozilla.org/en-US/docs/Web/API/Web_Speech_API/Using_the_Web_Speech_API) 26 | - Enable speech recognition on unsupported browsers by bridging it with [cloud-based service](https://npmjs.com/package/web-speech-cognitive-services) 27 | - Support grammar list thru [JSpeech Grammar Format](https://www.w3.org/TR/jsgf/) 28 | - Ability to interrupt recognition 29 | - Ability to [morph into other elements](#customization-thru-morphing) 30 | 31 | # How to use 32 | 33 | First, install our production version by `npm install react-dictate-button`. Or our development version by `npm install react-dictate-button@master`. 34 | 35 | ```jsx 36 | import { DictateButton } from 'react-dictate-button'; 37 | 38 | export default () => ( 39 | 46 | Start/stop 47 | 48 | ); 49 | ``` 50 | 51 | ## Props 52 | 53 | | Name | Type | Default | Description | 54 | | ------------------- | ------------------------ | ----------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | 55 | | `className` | `string` | `undefined` | Class name to apply to the button | 56 | | `continuous` | `boolean` | `false` | `true` to set Web Speech API to use continuous mode and should continue to recognize until stop, otherwise, `false` | 57 | | `disabled` | `boolean` | `false` | `true` to abort ongoing recognition and disable the button, otherwise, `false` | 58 | | `extra` | `{ [key: string]: any }` | `{}` | Additional properties to set to [`SpeechRecognition`](https://developer.mozilla.org/en-US/docs/Web/API/SpeechRecognition) before `start`, useful when bringing your own [`SpeechRecognition`](https://developer.mozilla.org/en-US/docs/Web/API/SpeechRecognition) | 59 | | `grammar` | `string` | `undefined` | Grammar list in [JSGF format](https://developer.mozilla.org/en-US/docs/Web/API/SpeechGrammarList/addFromString) | 60 | | `lang` | `string` | `undefined` | Language to recognize, for example, `'en-US'` or [`navigator.language`](https://developer.mozilla.org/en-US/docs/Web/API/NavigatorLanguage/language) | 61 | | `speechGrammarList` | `any` | `window.SpeechGrammarList` (or vendor-prefixed) | Bring your own [`SpeechGrammarList`](https://developer.mozilla.org/en-US/docs/Web/API/SpeechGrammarList) | 62 | | `speechRecognition` | `any` | `window.SpeechRecognition` (or vendor-prefixed) | Bring your own [`SpeechRecognition`](https://developer.mozilla.org/en-US/docs/Web/API/SpeechRecognition) | 63 | 64 | > Note: change of `extra`, `grammar`, `lang`, `speechGrammarList`, and `speechRecognition` will not take effect until next speech recognition is started. 65 | 66 | ## Events 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 93 | 94 | 95 | 96 | 97 | 98 | 99 | 100 | 101 | 102 | 112 | 113 | 114 | 115 | 116 | 117 | 123 | 124 | 125 |
NameSignatureDescription
onClick
(event: MouseEvent) => void
Emit when the user click on the button, preventDefault will stop recognition from starting
onDictate 85 |
({
 86 |   result: {
 87 |     confidence: number,
 88 |     transcript: number
 89 |   },
 90 |   type: 'dictate'
 91 | }) => void
92 |
Emit when recognition is completed
onError
(event: SpeechRecognitionErrorEvent) => void
Emit when error has occurred or recognition is interrupted, see below
onProgress 103 |
({
104 |   abortable: boolean,
105 |   results: [{
106 |     confidence: number,
107 |     transcript: number
108 |   }],
109 |   type: 'progress'
110 | }) => void
111 |
Emit for interim results, the array contains every segments of recognized text
onRawEvent
(event: SpeechRecognitionEvent) => void
118 | Emit for handling raw events from 119 | SpeechRecognition 122 |
126 | 127 | ## Hooks 128 | 129 | > Although previous versions exported a React Context, it is recommended to use the hooks interface. 130 | 131 | | Name | Signature | Description | 132 | | --------------- | ----------- | --------------------------------------------------------------------------------------------------- | 133 | | `useAbortable` | `[boolean]` | If ongoing speech recognition has `abort()` function and can be aborted, `true`, otherwise, `false` | 134 | | `useReadyState` | `[number]` | Returns the current state of recognition, refer to [this section](#function-as-a-child) | 135 | | `useSupported` | `[boolean]` | If speech recognition is supported, `true`, otherwise, `false` | 136 | 137 | ### Checks if speech recognition is supported 138 | 139 | To determines whether speech recognition is supported in the browser: 140 | 141 | - If `speechRecognition` prop is `undefined` 142 | - If both [`window.navigator.mediaDevices`](https://developer.mozilla.org/en-US/docs/Web/API/MediaDevices) and [`window.navigator.mediaDevices.getUserMedia`](https://developer.mozilla.org/en-US/docs/Web/API/MediaDevices/getUserMedia) are falsy, it is not supported 143 | - Probably the browser is not on a secure HTTP connection 144 | - If both `window.SpeechRecognition` and vendor-prefixed are falsy, it is not supported 145 | - If recognition failed once with `not-allowed` error code, it is not supported 146 | - Otherwise, it is supported 147 | 148 | > Even the browser is on an insecure HTTP connection, `window.SpeechRecognition` (or vendor-prefixed) will continue to be truthy. Instead, `mediaDevices.getUserMedia` is used for capability detection. 149 | 150 | ### Event lifecycle 151 | 152 | One of the design aspect is to make sure events are easy to understand and deterministic. First rule of thumb is to make sure `onProgress` will lead to either `onDictate` or `onError`. Here are some samples of event firing sequence (tested on Chrome 67): 153 | 154 | - Happy path: speech is recognized 155 | 1. `onStart` 156 | 1. `onProgress({})` (just started, therefore, no `results`) 157 | 1. `onProgress({ results: [] })` 158 | 1. `onDictate({ result: ... })` 159 | 1. `onEnd` 160 | - Happy path: speech is recognized with continuous mode 161 | 1. `onStart` 162 | 1. `onProgress({})` (just started, therefore, no `results`) 163 | 1. `onProgress({ results: [] })` 164 | 1. `onDictate({ result: ... })` 165 | 1. `onProgress({ results: [] })` 166 | 1. `onDictate({ result: ... })` 167 | 1. `onEnd` 168 | - Heard some sound, but nothing can be recognized 169 | 1. `onStart` 170 | 1. `onProgress({})` 171 | 1. `onDictate({})` (nothing is recognized, therefore, no `result`) 172 | 1. `onEnd` 173 | - Nothing is heard (audio device available but muted) 174 | 1. `onStart` 175 | 1. `onProgress({})` 176 | 1. `onError({ error: 'no-speech' })` 177 | 1. `onEnd` 178 | - Recognition aborted 179 | 1. `onStart` 180 | 1. `onProgress({})` 181 | 1. `onProgress({ results: [] })` 182 | 1. While speech is getting recognized, set `props.disabled` to `false`, abort recognition 183 | 1. `onError({ error: 'aborted' })` 184 | 1. `onEnd` 185 | - Not authorized to use speech or no audio device is availablabortable: truee 186 | 1. `onStart` 187 | 1. `onError({ error: 'not-allowed' })` 188 | 1. `onEnd` 189 | 190 | ## Function as a child 191 | 192 | Instead of passing child elements, you can pass a function to render different content based on ready state. This is called [function as a child](https://reactjs.org/docs/render-props.html#using-props-other-than-render). 193 | 194 | | Ready state | Description | 195 | | ----------- | -------------------------------------------------------------------------- | 196 | | `0` | Not started | 197 | | `1` | Starting recognition engine, recognition is not ready until it turn to `2` | 198 | | `2` | Recognizing | 199 | | `3` | Stopping | 200 | 201 | For example, 202 | 203 | ```jsx 204 | 205 | {({ readyState }) => 206 | readyState === 0 ? 'Start' : readyState === 1 ? 'Starting...' : readyState === 2 ? 'Listening...' : 'Stopping...' 207 | } 208 | 209 | ``` 210 | 211 | # Customization thru morphing 212 | 213 | You can build your own component by copying our layout code, without messing around the [logic code behind the scene](packages/component/src/Composer.js). For details, please refer to [`DictateButton.js`](packages/component/src/DictateButton.js), [`DictateCheckbox.js`](packages/component/src/DictateCheckbox.js), and [`DictationTextBox.js`](packages/pages/src/DictationTextBox.js). 214 | 215 | ## Checkbox version 216 | 217 | In addition to ` 57 | 65 | 66 | ); 67 | }; 68 | 69 | type DictationTextBoxProps = { 70 | buttonClassName?: string | undefined; 71 | className?: string | undefined; 72 | disabled?: boolean | undefined; 73 | grammar?: string | undefined; 74 | lang?: string | undefined; 75 | listeningText?: string | undefined; 76 | onChange?: ((event: { value: string | undefined }) => void) | undefined; 77 | onError?: ErrorEventHandler | undefined; 78 | speechGrammarList?: SpeechGrammarListPolyfill | undefined; 79 | speechRecognition?: SpeechRecognitionPolyfill | undefined; 80 | startText?: string | undefined; 81 | stopText?: string | undefined; 82 | textBoxClassName?: string | undefined; 83 | value?: string | undefined; 84 | }; 85 | 86 | const DictationTextBox = ({ 87 | buttonClassName, 88 | className, 89 | disabled, 90 | grammar, 91 | lang, 92 | listeningText = 'Listening…', 93 | onChange, 94 | onError, 95 | speechGrammarList, 96 | speechRecognition, 97 | startText = 'Dictate', 98 | stopText = 'Stop', 99 | textBoxClassName, 100 | value 101 | }: DictationTextBoxProps) => { 102 | const [interim, setInterim] = useState(''); 103 | const [listening, setListening] = useState(false); 104 | const [started, setStarted] = useState(false); 105 | const onChangeRef = useRefFrom(onChange); 106 | const onErrorRef = useRefFrom(onError); 107 | 108 | const handleChange = useCallback>( 109 | ({ currentTarget: { value } }) => onChangeRef.current && onChangeRef.current({ value }), 110 | [onChangeRef] 111 | ); 112 | 113 | const handleClick = useCallback(() => setStarted(started => !started), [setStarted]); 114 | 115 | const handleDictate = useCallback( 116 | ({ result }) => { 117 | const { transcript: value } = result || {}; 118 | 119 | setInterim(undefined); 120 | setListening(false); 121 | setStarted(false); 122 | 123 | onChangeRef.current && onChangeRef.current({ value }); 124 | }, 125 | [onChangeRef, setInterim, setListening, setStarted] 126 | ); 127 | 128 | const handleError = useCallback( 129 | event => { 130 | console.log('error', event); 131 | 132 | setInterim(undefined); 133 | setListening(false); 134 | setStarted(false); 135 | 136 | onErrorRef.current && onErrorRef.current(event); 137 | }, 138 | [onErrorRef, setInterim, setListening, setStarted] 139 | ); 140 | 141 | const handleProgress = useCallback( 142 | ({ results }) => { 143 | setInterim((results || []).map(result => result.transcript.trim()).join(' ')); 144 | setListening(true); 145 | }, 146 | [setInterim, setListening] 147 | ); 148 | 149 | return ( 150 | 160 | 175 | 176 | ); 177 | }; 178 | 179 | export default DictationTextBox; 180 | -------------------------------------------------------------------------------- /packages/pages/src/index.css: -------------------------------------------------------------------------------- 1 | body { 2 | margin: 0; 3 | font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Roboto', 'Oxygen', 4 | 'Ubuntu', 'Cantarell', 'Fira Sans', 'Droid Sans', 'Helvetica Neue', 5 | sans-serif; 6 | -webkit-font-smoothing: antialiased; 7 | -moz-osx-font-smoothing: grayscale; 8 | } 9 | 10 | code { 11 | font-family: source-code-pro, Menlo, Monaco, Consolas, 'Courier New', 12 | monospace; 13 | } 14 | -------------------------------------------------------------------------------- /packages/pages/src/index.tsx: -------------------------------------------------------------------------------- 1 | import './index.css'; 2 | import React from 'react'; 3 | // This is needed for testing React 16 and 17. 4 | // eslint-disable-next-line react/no-deprecated 5 | import { render } from 'react-dom'; 6 | 7 | import App from './App.tsx'; 8 | 9 | const rootElement = document.getElementById('root'); 10 | 11 | // rootElement && createRoot(rootElement).render(); 12 | 13 | render(, rootElement); 14 | -------------------------------------------------------------------------------- /packages/pages/src/tsconfig.custom.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "@tsconfig/strictest/tsconfig.json" 3 | } 4 | -------------------------------------------------------------------------------- /packages/pages/src/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "allowImportingTsExtensions": true, 4 | "esModuleInterop": true, 5 | "jsx": "react", 6 | "lib": [ 7 | "DOM", 8 | "ESNext", 9 | "WebWorker" 10 | ], 11 | "moduleResolution": "Bundler", 12 | "noEmit": true, 13 | "strict": true, 14 | "target": "ESNext", 15 | "types": [] 16 | }, 17 | "extends": "./tsconfig.custom.json" 18 | } 19 | -------------------------------------------------------------------------------- /packages/react-dictate-button/.eslintrc.custom.yml: -------------------------------------------------------------------------------- 1 | env: 2 | browser: true 3 | -------------------------------------------------------------------------------- /packages/react-dictate-button/.eslintrc.yml: -------------------------------------------------------------------------------- 1 | extends: 2 | - ./.eslintrc.custom.yml 3 | -------------------------------------------------------------------------------- /packages/react-dictate-button/.gitignore: -------------------------------------------------------------------------------- 1 | /.env 2 | /*.tgz 3 | /CHANGELOG.md 4 | /coverage/ 5 | /dist/ 6 | /LICENSE 7 | /node_modules/ 8 | /README.md 9 | -------------------------------------------------------------------------------- /packages/react-dictate-button/__tests__/__setup__/typingTestTransformer.js: -------------------------------------------------------------------------------- 1 | // Notes: to test changes in this file, run "jest" with "--no-cache" argument. 2 | 3 | const run = ({ filename }) => { 4 | const escapeStringRegexp = require('escape-string-regexp'); 5 | const fs = require('fs/promises'); 6 | const { extname } = require('path'); 7 | const typeScript = require('typescript'); 8 | 9 | const TS_EXPECT_ERROR = /(\/\/\s+)(@ts-expect-error)[\s+(.*)]/gu; 10 | const TSCONFIG = { 11 | allowImportingTsExtensions: true, 12 | allowSyntheticDefaultImports: true, 13 | jsx: typeScript.JsxEmit.React, 14 | noEmit: true, 15 | skipLibCheck: true, 16 | strict: true 17 | }; 18 | 19 | async function compile(filename) { 20 | const program = typeScript.createProgram([filename], TSCONFIG); 21 | 22 | const emitResult = program.emit(); 23 | const allDiagnostics = typeScript.getPreEmitDiagnostics(program).concat(emitResult.diagnostics); 24 | 25 | allDiagnostics.forEach(({ file, messageText, start }) => { 26 | if (file && start) { 27 | const { line, character } = file.getLineAndCharacterOfPosition(start); 28 | const message = typeScript.flattenDiagnosticMessageText(messageText, '\n'); 29 | 30 | throw new Error(`Failed to compile ${file.fileName} (${line + 1},${character + 1}): ${message}`); 31 | } else { 32 | throw new Error(typeScript.flattenDiagnosticMessageText(messageText, '\n')); 33 | } 34 | }); 35 | } 36 | 37 | async function checkExpectError(filename) { 38 | const sourceText = await fs.readFile(filename, 'utf-8'); 39 | const sourceTextWithoutExpectError = sourceText.replace(TS_EXPECT_ERROR, '$1'); 40 | 41 | const extension = extname(filename); 42 | const tempFilename = filename.substring(0, filename.length - extension.length) + `.tmp${extension}`; 43 | 44 | await fs.writeFile(tempFilename, sourceTextWithoutExpectError); 45 | 46 | try { 47 | const program = typeScript.createProgram([tempFilename], TSCONFIG); 48 | 49 | const emitResult = program.emit(); 50 | const allDiagnostics = typeScript.getPreEmitDiagnostics(program).concat(emitResult.diagnostics); 51 | 52 | allDiagnostics.forEach(({ file, messageText, start }) => { 53 | if (file && start) { 54 | const { line } = file.getLineAndCharacterOfPosition(start); 55 | const message = typeScript.flattenDiagnosticMessageText(messageText, '\n'); 56 | 57 | const expectedErrorLine = file.getFullText().split('\n')[line - 1]; 58 | const expectedError = expectedErrorLine?.replace(/\s*\/\/\s+/u, '').trim(); 59 | let expectedErrors = [expectedError]; 60 | 61 | try { 62 | const parsed = JSON.parse(expectedError); 63 | 64 | if (Array.isArray(expectedErrors) && expectedErrors.every(value => typeof value === 'string')) { 65 | expectedErrors = parsed; 66 | } 67 | } catch {} 68 | 69 | expect(message).toEqual(expect.stringMatching(new RegExp(expectedErrors.map(escapeStringRegexp).join('|')))); 70 | } else { 71 | throw new Error(typeScript.flattenDiagnosticMessageText(messageText, '\n')); 72 | } 73 | }); 74 | } finally { 75 | fs.unlink(tempFilename); 76 | } 77 | } 78 | 79 | describe(filename, () => { 80 | test('should succeed', () => compile(filename)); 81 | test('should have @ts-expect-error describing compile errors correctly', () => checkExpectError(filename)); 82 | }); 83 | }; 84 | 85 | module.exports = { 86 | process(_, filename) { 87 | return { code: `(${run})(${JSON.stringify({ filename })})` }; 88 | } 89 | }; 90 | -------------------------------------------------------------------------------- /packages/react-dictate-button/__tests__/abortOnUnmount.spec.tsx: -------------------------------------------------------------------------------- 1 | /** @jest-environment @happy-dom/jest-environment */ 2 | 3 | import { act, fireEvent, render, screen, type RenderResult } from '@testing-library/react'; 4 | import React, { Fragment } from 'react'; 5 | import { 6 | DictateButton, 7 | type DictateEventHandler, 8 | type EndEventHandler, 9 | type ProgressEventHandler, 10 | type StartEventHandler 11 | } from '../src/index'; 12 | import { SpeechRecognition } from '../src/internal'; 13 | 14 | describe('abort on unmount scenario', () => { 15 | let abort: jest.SpyInstance | undefined; 16 | let constructSpeechRecognition: jest.Mock; 17 | let onDictate: jest.Mock, Parameters, undefined>; 18 | let onEnd: jest.Mock, Parameters, undefined>; 19 | let onProgress: jest.Mock, Parameters, undefined>; 20 | let onStart: jest.Mock, Parameters, undefined>; 21 | let renderResult: RenderResult; 22 | let start: jest.SpyInstance | undefined; 23 | 24 | beforeEach(() => { 25 | constructSpeechRecognition = jest.fn().mockImplementationOnce(() => { 26 | const speechRecognition = new SpeechRecognition(); 27 | 28 | abort = jest.spyOn(speechRecognition, 'abort'); 29 | start = jest.spyOn(speechRecognition, 'start'); 30 | 31 | return speechRecognition; 32 | }); 33 | 34 | onDictate = jest.fn(); 35 | onEnd = jest.fn(); 36 | onProgress = jest.fn(); 37 | onStart = jest.fn(); 38 | 39 | renderResult = render( 40 | 48 | Click me 49 | 50 | ); 51 | 52 | act(() => { 53 | fireEvent.click(screen.getByText('Click me')); 54 | }); 55 | 56 | expect(constructSpeechRecognition).toHaveBeenCalledTimes(1); 57 | expect(abort).toHaveBeenCalledTimes(0); 58 | expect(start).toHaveBeenCalledTimes(1); 59 | 60 | act(() => { 61 | const speechRecognition: SpeechRecognition = constructSpeechRecognition.mock.results[0]?.value; 62 | 63 | speechRecognition.dispatchEvent(new Event('start', {})); 64 | speechRecognition.dispatchEvent(new Event('audiostart', {})); 65 | speechRecognition.dispatchEvent(new Event('soundstart', {})); 66 | speechRecognition.dispatchEvent(new Event('speechstart', {})); 67 | }); 68 | 69 | expect(onProgress).toHaveBeenCalledTimes(1); 70 | }); 71 | 72 | describe('when unmounted', () => { 73 | beforeEach(() => renderResult.rerender()); 74 | 75 | test('abort() should have been called', () => expect(abort).toHaveBeenCalledTimes(1)); 76 | }); 77 | }); 78 | -------------------------------------------------------------------------------- /packages/react-dictate-button/__tests__/continuous.spec.tsx: -------------------------------------------------------------------------------- 1 | /** @jest-environment @happy-dom/jest-environment */ 2 | 3 | import { act, fireEvent, render, screen, type RenderResult } from '@testing-library/react'; 4 | import React from 'react'; 5 | import { 6 | DictateButton, 7 | type DictateEventHandler, 8 | type EndEventHandler, 9 | type ProgressEventHandler, 10 | type StartEventHandler 11 | } from '../src/index'; 12 | import { 13 | SpeechRecognition, 14 | SpeechRecognitionAlternative, 15 | SpeechRecognitionEvent, 16 | SpeechRecognitionResult, 17 | SpeechRecognitionResultList 18 | } from '../src/internal'; 19 | 20 | describe('with continuous mode', () => { 21 | let constructSpeechRecognition: jest.Mock; 22 | let onDictate: jest.Mock, Parameters, undefined>; 23 | let onEnd: jest.Mock, Parameters, undefined>; 24 | let onProgress: jest.Mock, Parameters, undefined>; 25 | let onStart: jest.Mock, Parameters, undefined>; 26 | let renderResult: RenderResult; 27 | let start: jest.SpyInstance | undefined; 28 | 29 | test('should onDictate and onProgress event accordingly', () => { 30 | constructSpeechRecognition = jest.fn().mockImplementationOnce(() => { 31 | const speechRecognition = new SpeechRecognition(); 32 | 33 | start = jest.spyOn(speechRecognition, 'start'); 34 | 35 | return speechRecognition; 36 | }); 37 | 38 | onDictate = jest.fn(); 39 | onEnd = jest.fn(); 40 | onProgress = jest.fn(); 41 | onStart = jest.fn(); 42 | 43 | renderResult = render( 44 | 53 | Click me 54 | 55 | ); 56 | 57 | act(() => { 58 | fireEvent.click(screen.getByText('Click me')); 59 | }); 60 | 61 | const speechRecognition: SpeechRecognition = constructSpeechRecognition.mock.results[0]?.value; 62 | 63 | expect(onProgress).toHaveBeenCalledTimes(0); 64 | expect(start).toHaveBeenCalledTimes(1); 65 | 66 | act(() => { 67 | speechRecognition.dispatchEvent(new Event('start', {})); 68 | speechRecognition.dispatchEvent(new Event('audiostart', {})); 69 | speechRecognition.dispatchEvent(new Event('soundstart', {})); 70 | speechRecognition.dispatchEvent(new Event('speechstart', {})); 71 | }); 72 | 73 | expect(onEnd).toHaveBeenCalledTimes(0); 74 | expect(onStart).toHaveBeenCalledTimes(1); 75 | 76 | onEnd.mockReset(); 77 | onStart.mockReset(); 78 | 79 | expect(onProgress).toHaveBeenCalledTimes(1); 80 | expect(onProgress.mock.calls[0][0]).toHaveProperty('type', 'progress'); 81 | expect(onProgress.mock.calls[0][0]).not.toHaveProperty('results', expect.anything()); 82 | 83 | // --- 84 | 85 | onDictate.mockReset(); 86 | onProgress.mockReset(); 87 | 88 | act(() => { 89 | speechRecognition.dispatchEvent( 90 | new SpeechRecognitionEvent('result', { 91 | resultIndex: 0, 92 | results: new SpeechRecognitionResultList( 93 | new SpeechRecognitionResult(new SpeechRecognitionAlternative(0.009999999776482582, 'test')) 94 | ) 95 | }) 96 | ); 97 | }); 98 | 99 | expect(onDictate).toHaveBeenCalledTimes(0); 100 | expect(onEnd).toHaveBeenCalledTimes(0); 101 | expect(onProgress).toHaveBeenCalledTimes(1); 102 | expect(onStart).toHaveBeenCalledTimes(0); 103 | expect(onProgress.mock.calls[0][0]).toHaveProperty('type', 'progress'); 104 | expect(onProgress.mock.calls[0][0]).toHaveProperty('results', [ 105 | { confidence: 0.009999999776482582, transcript: 'test' } 106 | ]); 107 | 108 | // --- 109 | 110 | onDictate.mockReset(); 111 | onProgress.mockReset(); 112 | 113 | act(() => { 114 | speechRecognition.dispatchEvent( 115 | new SpeechRecognitionEvent('result', { 116 | resultIndex: 0, 117 | results: new SpeechRecognitionResultList( 118 | new SpeechRecognitionResult(new SpeechRecognitionAlternative(0.009999999776482582, 'testing')) 119 | ) 120 | }) 121 | ); 122 | }); 123 | 124 | expect(onDictate).toHaveBeenCalledTimes(0); 125 | expect(onEnd).toHaveBeenCalledTimes(0); 126 | expect(onProgress).toHaveBeenCalledTimes(1); 127 | expect(onStart).toHaveBeenCalledTimes(0); 128 | expect(onProgress.mock.calls[0][0]).toHaveProperty('type', 'progress'); 129 | expect(onProgress.mock.calls[0][0]).toHaveProperty('results', [ 130 | { confidence: 0.009999999776482582, transcript: 'testing' } 131 | ]); 132 | 133 | // --- 134 | 135 | onDictate.mockReset(); 136 | onProgress.mockReset(); 137 | 138 | act(() => { 139 | speechRecognition.dispatchEvent( 140 | new SpeechRecognitionEvent('result', { 141 | resultIndex: 0, 142 | results: new SpeechRecognitionResultList( 143 | SpeechRecognitionResult.fromFinalized(new SpeechRecognitionAlternative(0.966937243938446, 'testing')) 144 | ) 145 | }) 146 | ); 147 | }); 148 | 149 | expect(onDictate).toHaveBeenCalledTimes(1); 150 | expect(onEnd).toHaveBeenCalledTimes(0); 151 | expect(onProgress).toHaveBeenCalledTimes(0); 152 | expect(onStart).toHaveBeenCalledTimes(0); 153 | expect(onDictate.mock.calls[0][0]).toHaveProperty('type', 'dictate'); 154 | expect(onDictate.mock.calls[0][0]).toHaveProperty('result', { 155 | confidence: 0.966937243938446, 156 | transcript: 'testing' 157 | }); 158 | 159 | // --- 160 | 161 | onDictate.mockReset(); 162 | onProgress.mockReset(); 163 | 164 | act(() => { 165 | speechRecognition.dispatchEvent( 166 | new SpeechRecognitionEvent('result', { 167 | resultIndex: 1, 168 | results: new SpeechRecognitionResultList( 169 | SpeechRecognitionResult.fromFinalized(new SpeechRecognitionAlternative(0.966937243938446, 'testing')), 170 | new SpeechRecognitionResult(new SpeechRecognitionAlternative(0.009999999776482582, ' one')) 171 | ) 172 | }) 173 | ); 174 | }); 175 | 176 | expect(onDictate).toHaveBeenCalledTimes(0); 177 | expect(onEnd).toHaveBeenCalledTimes(0); 178 | expect(onProgress).toHaveBeenCalledTimes(1); 179 | expect(onStart).toHaveBeenCalledTimes(0); 180 | expect(onProgress.mock.calls[0][0]).toHaveProperty('type', 'progress'); 181 | expect(onProgress.mock.calls[0][0]).toHaveProperty('results', [ 182 | { confidence: 0.009999999776482582, transcript: ' one' } 183 | ]); 184 | 185 | // --- 186 | 187 | onDictate.mockReset(); 188 | onProgress.mockReset(); 189 | 190 | act(() => { 191 | speechRecognition.dispatchEvent( 192 | new SpeechRecognitionEvent('result', { 193 | resultIndex: 1, 194 | results: new SpeechRecognitionResultList( 195 | SpeechRecognitionResult.fromFinalized(new SpeechRecognitionAlternative(0.966937243938446, 'testing')), 196 | SpeechRecognitionResult.fromFinalized(new SpeechRecognitionAlternative(0.9035850167274475, ' one')) 197 | ) 198 | }) 199 | ); 200 | }); 201 | 202 | expect(onDictate).toHaveBeenCalledTimes(1); 203 | expect(onEnd).toHaveBeenCalledTimes(0); 204 | expect(onProgress).toHaveBeenCalledTimes(0); 205 | expect(onStart).toHaveBeenCalledTimes(0); 206 | expect(onDictate.mock.calls[0][0]).toHaveProperty('type', 'dictate'); 207 | expect(onDictate.mock.calls[0][0]).toHaveProperty('result', { confidence: 0.9035850167274475, transcript: ' one' }); 208 | 209 | // --- 210 | 211 | onDictate.mockReset(); 212 | onProgress.mockReset(); 213 | 214 | act(() => { 215 | speechRecognition.dispatchEvent( 216 | new SpeechRecognitionEvent('result', { 217 | resultIndex: 2, 218 | results: new SpeechRecognitionResultList( 219 | SpeechRecognitionResult.fromFinalized(new SpeechRecognitionAlternative(0.966937243938446, 'testing')), 220 | SpeechRecognitionResult.fromFinalized(new SpeechRecognitionAlternative(0.9035850167274475, ' one')), 221 | new SpeechRecognitionResult(new SpeechRecognitionAlternative(0.009999999776482582, ' two')) 222 | ) 223 | }) 224 | ); 225 | }); 226 | 227 | expect(onDictate).toHaveBeenCalledTimes(0); 228 | expect(onEnd).toHaveBeenCalledTimes(0); 229 | expect(onProgress).toHaveBeenCalledTimes(1); 230 | expect(onStart).toHaveBeenCalledTimes(0); 231 | expect(onProgress.mock.calls[0][0]).toHaveProperty('type', 'progress'); 232 | expect(onProgress.mock.calls[0][0]).toHaveProperty('results', [ 233 | { confidence: 0.009999999776482582, transcript: ' two' } 234 | ]); 235 | 236 | // --- 237 | 238 | onDictate.mockReset(); 239 | onProgress.mockReset(); 240 | 241 | act(() => { 242 | speechRecognition.dispatchEvent( 243 | new SpeechRecognitionEvent('result', { 244 | resultIndex: 2, 245 | results: new SpeechRecognitionResultList( 246 | SpeechRecognitionResult.fromFinalized(new SpeechRecognitionAlternative(0.966937243938446, 'testing')), 247 | SpeechRecognitionResult.fromFinalized(new SpeechRecognitionAlternative(0.9035850167274475, ' one')), 248 | SpeechRecognitionResult.fromFinalized(new SpeechRecognitionAlternative(0.8551138043403625, ' two')) 249 | ) 250 | }) 251 | ); 252 | }); 253 | 254 | expect(onDictate).toHaveBeenCalledTimes(1); 255 | expect(onEnd).toHaveBeenCalledTimes(0); 256 | expect(onProgress).toHaveBeenCalledTimes(0); 257 | expect(onStart).toHaveBeenCalledTimes(0); 258 | expect(onDictate.mock.calls[0][0]).toHaveProperty('type', 'dictate'); 259 | expect(onDictate.mock.calls[0][0]).toHaveProperty('result', { 260 | confidence: 0.8551138043403625, 261 | transcript: ' two' 262 | }); 263 | 264 | // --- 265 | 266 | onDictate.mockReset(); 267 | onProgress.mockReset(); 268 | 269 | act(() => { 270 | speechRecognition.dispatchEvent( 271 | new SpeechRecognitionEvent('result', { 272 | resultIndex: 3, 273 | results: new SpeechRecognitionResultList( 274 | SpeechRecognitionResult.fromFinalized(new SpeechRecognitionAlternative(0.966937243938446, 'testing')), 275 | SpeechRecognitionResult.fromFinalized(new SpeechRecognitionAlternative(0.9035850167274475, ' one')), 276 | SpeechRecognitionResult.fromFinalized(new SpeechRecognitionAlternative(0.8551138043403625, ' two')), 277 | new SpeechRecognitionResult(new SpeechRecognitionAlternative(0.009999999776482582, ' three')) 278 | ) 279 | }) 280 | ); 281 | }); 282 | 283 | expect(onDictate).toHaveBeenCalledTimes(0); 284 | expect(onEnd).toHaveBeenCalledTimes(0); 285 | expect(onProgress).toHaveBeenCalledTimes(1); 286 | expect(onStart).toHaveBeenCalledTimes(0); 287 | expect(onProgress.mock.calls[0][0]).toHaveProperty('type', 'progress'); 288 | expect(onProgress.mock.calls[0][0]).toHaveProperty('results', [ 289 | { 290 | confidence: 0.009999999776482582, 291 | transcript: ' three' 292 | } 293 | ]); 294 | 295 | // --- 296 | 297 | onDictate.mockReset(); 298 | onProgress.mockReset(); 299 | 300 | act(() => { 301 | speechRecognition.dispatchEvent( 302 | new SpeechRecognitionEvent('result', { 303 | resultIndex: 3, 304 | results: new SpeechRecognitionResultList( 305 | SpeechRecognitionResult.fromFinalized(new SpeechRecognitionAlternative(0.966937243938446, 'testing')), 306 | SpeechRecognitionResult.fromFinalized(new SpeechRecognitionAlternative(0.9035850167274475, ' one')), 307 | SpeechRecognitionResult.fromFinalized(new SpeechRecognitionAlternative(0.8551138043403625, ' two')), 308 | SpeechRecognitionResult.fromFinalized(new SpeechRecognitionAlternative(0.9290534257888794, ' three')) 309 | ) 310 | }) 311 | ); 312 | }); 313 | 314 | expect(onDictate).toHaveBeenCalledTimes(1); 315 | expect(onEnd).toHaveBeenCalledTimes(0); 316 | expect(onProgress).toHaveBeenCalledTimes(0); 317 | expect(onStart).toHaveBeenCalledTimes(0); 318 | expect(onDictate.mock.calls[0][0]).toHaveProperty('type', 'dictate'); 319 | expect(onDictate.mock.calls[0][0]).toHaveProperty('result', { 320 | confidence: 0.9290534257888794, 321 | transcript: ' three' 322 | }); 323 | 324 | // --- 325 | 326 | onDictate.mockReset(); 327 | onProgress.mockReset(); 328 | 329 | act(() => { 330 | speechRecognition.dispatchEvent( 331 | new SpeechRecognitionEvent('result', { 332 | resultIndex: 4, 333 | results: new SpeechRecognitionResultList( 334 | SpeechRecognitionResult.fromFinalized(new SpeechRecognitionAlternative(0.966937243938446, 'testing')), 335 | SpeechRecognitionResult.fromFinalized(new SpeechRecognitionAlternative(0.9035850167274475, ' one')), 336 | SpeechRecognitionResult.fromFinalized(new SpeechRecognitionAlternative(0.8551138043403625, ' two')), 337 | SpeechRecognitionResult.fromFinalized(new SpeechRecognitionAlternative(0.9290534257888794, ' three')), 338 | new SpeechRecognitionResult(new SpeechRecognitionAlternative(0.009999999776482582, ' test')) 339 | ) 340 | }) 341 | ); 342 | }); 343 | 344 | expect(onDictate).toHaveBeenCalledTimes(0); 345 | expect(onEnd).toHaveBeenCalledTimes(0); 346 | expect(onProgress).toHaveBeenCalledTimes(1); 347 | expect(onStart).toHaveBeenCalledTimes(0); 348 | expect(onProgress.mock.calls[0][0]).toHaveProperty('type', 'progress'); 349 | expect(onProgress.mock.calls[0][0]).toHaveProperty('results', [ 350 | { 351 | confidence: 0.009999999776482582, 352 | transcript: ' test' 353 | } 354 | ]); 355 | 356 | // --- 357 | 358 | onDictate.mockReset(); 359 | onProgress.mockReset(); 360 | 361 | act(() => { 362 | speechRecognition.dispatchEvent( 363 | new SpeechRecognitionEvent('result', { 364 | resultIndex: 4, 365 | results: new SpeechRecognitionResultList( 366 | SpeechRecognitionResult.fromFinalized(new SpeechRecognitionAlternative(0.966937243938446, 'testing')), 367 | SpeechRecognitionResult.fromFinalized(new SpeechRecognitionAlternative(0.9035850167274475, ' one')), 368 | SpeechRecognitionResult.fromFinalized(new SpeechRecognitionAlternative(0.8551138043403625, ' two')), 369 | SpeechRecognitionResult.fromFinalized(new SpeechRecognitionAlternative(0.9290534257888794, ' three')), 370 | new SpeechRecognitionResult(new SpeechRecognitionAlternative(0.009999999776482582, ' testing')) 371 | ) 372 | }) 373 | ); 374 | }); 375 | 376 | expect(onDictate).toHaveBeenCalledTimes(0); 377 | expect(onEnd).toHaveBeenCalledTimes(0); 378 | expect(onProgress).toHaveBeenCalledTimes(1); 379 | expect(onStart).toHaveBeenCalledTimes(0); 380 | expect(onProgress.mock.calls[0][0]).toHaveProperty('type', 'progress'); 381 | expect(onProgress.mock.calls[0][0]).toHaveProperty('results', [ 382 | { 383 | confidence: 0.009999999776482582, 384 | transcript: ' testing' 385 | } 386 | ]); 387 | 388 | // --- 389 | 390 | onDictate.mockReset(); 391 | onProgress.mockReset(); 392 | 393 | act(() => { 394 | speechRecognition.dispatchEvent( 395 | new SpeechRecognitionEvent('result', { 396 | resultIndex: 4, 397 | results: new SpeechRecognitionResultList( 398 | SpeechRecognitionResult.fromFinalized(new SpeechRecognitionAlternative(0.966937243938446, 'testing')), 399 | SpeechRecognitionResult.fromFinalized(new SpeechRecognitionAlternative(0.9035850167274475, ' one')), 400 | SpeechRecognitionResult.fromFinalized(new SpeechRecognitionAlternative(0.8551138043403625, ' two')), 401 | SpeechRecognitionResult.fromFinalized(new SpeechRecognitionAlternative(0.9290534257888794, ' three')), 402 | SpeechRecognitionResult.fromFinalized(new SpeechRecognitionAlternative(0.9721954464912415, ' testing')) 403 | ) 404 | }) 405 | ); 406 | }); 407 | 408 | expect(onDictate).toHaveBeenCalledTimes(1); 409 | expect(onEnd).toHaveBeenCalledTimes(0); 410 | expect(onProgress).toHaveBeenCalledTimes(0); 411 | expect(onStart).toHaveBeenCalledTimes(0); 412 | expect(onDictate.mock.calls[0][0]).toHaveProperty('type', 'dictate'); 413 | expect(onDictate.mock.calls[0][0]).toHaveProperty('result', { 414 | confidence: 0.9721954464912415, 415 | transcript: ' testing' 416 | }); 417 | 418 | // --- 419 | 420 | onDictate.mockReset(); 421 | onProgress.mockReset(); 422 | 423 | act(() => { 424 | speechRecognition.dispatchEvent(new Event('speechend')); 425 | speechRecognition.dispatchEvent(new Event('soundend')); 426 | speechRecognition.dispatchEvent(new Event('audioend')); 427 | speechRecognition.dispatchEvent(new Event('end')); 428 | }); 429 | 430 | expect(onDictate).toHaveBeenCalledTimes(0); 431 | expect(onEnd).toHaveBeenCalledTimes(1); 432 | expect(onProgress).toHaveBeenCalledTimes(0); 433 | expect(onStart).toHaveBeenCalledTimes(0); 434 | }); 435 | }); 436 | -------------------------------------------------------------------------------- /packages/react-dictate-button/__tests__/continuousModeNotSupported.spec.tsx: -------------------------------------------------------------------------------- 1 | /** @jest-environment @happy-dom/jest-environment */ 2 | 3 | // In some browsers where continuous mode is not supported or honored, it should work as in interactive mode. 4 | // For example, as soon as "end" event is received, the DictateCheckbox should be unchecked, instead of relying on "dictate" event. 5 | 6 | import { act, fireEvent, render, screen } from '@testing-library/react'; 7 | import React from 'react'; 8 | import { 9 | DictateCheckbox, 10 | type DictateEventHandler, 11 | type EndEventHandler, 12 | type ProgressEventHandler, 13 | type StartEventHandler 14 | } from '../src/index'; 15 | import { 16 | SpeechRecognition, 17 | SpeechRecognitionAlternative, 18 | SpeechRecognitionEvent, 19 | SpeechRecognitionResult, 20 | SpeechRecognitionResultList 21 | } from '../src/internal'; 22 | 23 | describe('not honoring continuous mode', () => { 24 | let constructSpeechRecognition: jest.Mock; 25 | let onDictate: jest.Mock, Parameters, undefined>; 26 | let onEnd: jest.Mock, Parameters, undefined>; 27 | let onProgress: jest.Mock, Parameters, undefined>; 28 | let onStart: jest.Mock, Parameters, undefined>; 29 | let start: jest.SpyInstance | undefined; 30 | 31 | test('should act as interactive mode', () => { 32 | constructSpeechRecognition = jest.fn().mockImplementationOnce(() => { 33 | const speechRecognition = new SpeechRecognition(); 34 | 35 | speechRecognition.continuous = true; 36 | 37 | start = jest.spyOn(speechRecognition, 'start'); 38 | 39 | return speechRecognition; 40 | }); 41 | 42 | onDictate = jest.fn(); 43 | onEnd = jest.fn(); 44 | onProgress = jest.fn(); 45 | onStart = jest.fn(); 46 | 47 | render( 48 | 57 | Click me 58 | 59 | ); 60 | 61 | act(() => { 62 | fireEvent.click(screen.getByText('Click me')); 63 | }); 64 | 65 | const speechRecognition: SpeechRecognition = constructSpeechRecognition.mock.results[0]?.value; 66 | 67 | expect(onProgress).toHaveBeenCalledTimes(0); 68 | expect(start).toHaveBeenCalledTimes(1); 69 | 70 | // Web Speech provider does not honor continuous mode. 71 | speechRecognition.continuous = false; 72 | 73 | act(() => { 74 | speechRecognition.dispatchEvent(new Event('start', {})); 75 | speechRecognition.dispatchEvent(new Event('audiostart', {})); 76 | speechRecognition.dispatchEvent(new Event('soundstart', {})); 77 | speechRecognition.dispatchEvent(new Event('speechstart', {})); 78 | }); 79 | 80 | expect(onDictate).toHaveBeenCalledTimes(0); 81 | expect(onEnd).toHaveBeenCalledTimes(0); 82 | expect(onProgress).toHaveBeenCalledTimes(1); 83 | expect(onStart).toHaveBeenCalledTimes(1); 84 | 85 | onStart.mockReset(); 86 | 87 | // --- 88 | 89 | onDictate.mockReset(); 90 | onProgress.mockReset(); 91 | 92 | act(() => { 93 | speechRecognition.dispatchEvent( 94 | new SpeechRecognitionEvent('result', { 95 | resultIndex: 0, 96 | results: new SpeechRecognitionResultList( 97 | new SpeechRecognitionResult(new SpeechRecognitionAlternative(0.009999999776482582, 'test')) 98 | ) 99 | }) 100 | ); 101 | }); 102 | 103 | expect(onDictate).toHaveBeenCalledTimes(0); 104 | expect(onEnd).toHaveBeenCalledTimes(0); 105 | expect(onProgress).toHaveBeenCalledTimes(1); 106 | expect(onStart).toHaveBeenCalledTimes(0); 107 | expect(onProgress.mock.calls[0][0]).toHaveProperty('type', 'progress'); 108 | expect(onProgress.mock.calls[0][0]).toHaveProperty('results', [ 109 | { confidence: 0.009999999776482582, transcript: 'test' } 110 | ]); 111 | 112 | // --- 113 | 114 | onDictate.mockReset(); 115 | onProgress.mockReset(); 116 | 117 | act(() => { 118 | speechRecognition.dispatchEvent( 119 | new SpeechRecognitionEvent('result', { 120 | resultIndex: 0, 121 | results: new SpeechRecognitionResultList( 122 | new SpeechRecognitionResult(new SpeechRecognitionAlternative(0.009999999776482582, 'testing')) 123 | ) 124 | }) 125 | ); 126 | }); 127 | 128 | expect(onDictate).toHaveBeenCalledTimes(0); 129 | expect(onEnd).toHaveBeenCalledTimes(0); 130 | expect(onProgress).toHaveBeenCalledTimes(1); 131 | expect(onStart).toHaveBeenCalledTimes(0); 132 | expect(onProgress.mock.calls[0][0]).toHaveProperty('type', 'progress'); 133 | expect(onProgress.mock.calls[0][0]).toHaveProperty('results', [ 134 | { confidence: 0.009999999776482582, transcript: 'testing' } 135 | ]); 136 | 137 | // --- 138 | 139 | onDictate.mockReset(); 140 | onProgress.mockReset(); 141 | 142 | act(() => { 143 | speechRecognition.dispatchEvent( 144 | new SpeechRecognitionEvent('result', { 145 | resultIndex: 0, 146 | results: new SpeechRecognitionResultList( 147 | SpeechRecognitionResult.fromFinalized(new SpeechRecognitionAlternative(0.8999999761581421, 'testing')) 148 | ) 149 | }) 150 | ); 151 | }); 152 | 153 | expect(onDictate).toHaveBeenCalledTimes(1); 154 | expect(onEnd).toHaveBeenCalledTimes(0); 155 | expect(onProgress).toHaveBeenCalledTimes(0); 156 | expect(onStart).toHaveBeenCalledTimes(0); 157 | expect(onDictate.mock.calls[0][0]).toHaveProperty('type', 'dictate'); 158 | expect(onDictate.mock.calls[0][0]).toHaveProperty('result', { 159 | confidence: 0.8999999761581421, 160 | transcript: 'testing' 161 | }); 162 | 163 | // --- 164 | 165 | onDictate.mockReset(); 166 | onProgress.mockReset(); 167 | 168 | act(() => { 169 | speechRecognition.dispatchEvent(new Event('speechend')); 170 | speechRecognition.dispatchEvent(new Event('soundend')); 171 | speechRecognition.dispatchEvent(new Event('audioend')); 172 | speechRecognition.dispatchEvent(new Event('end')); 173 | }); 174 | 175 | expect(onDictate).toHaveBeenCalledTimes(0); 176 | expect(onEnd).toHaveBeenCalledTimes(1); // Should end, as if in interactive mode. 177 | expect(onProgress).toHaveBeenCalledTimes(0); 178 | expect(onStart).toHaveBeenCalledTimes(0); 179 | 180 | // Should be unchecked, as if in interactive mode. 181 | expect(screen.getByText('Click me').querySelector('input')).toHaveProperty('checked', false); 182 | }); 183 | }); 184 | -------------------------------------------------------------------------------- /packages/react-dictate-button/__tests__/errorWithNoSpeech.tsx: -------------------------------------------------------------------------------- 1 | /** @jest-environment @happy-dom/jest-environment */ 2 | 3 | import { act, fireEvent, render, screen } from '@testing-library/react'; 4 | import React from 'react'; 5 | import { 6 | DictateButton, 7 | type DictateEventHandler, 8 | type EndEventHandler, 9 | type ErrorEventHandler, 10 | type ProgressEventHandler, 11 | type StartEventHandler 12 | } from '../src/index'; 13 | import { SpeechRecognition, SpeechRecognitionErrorEvent } from '../src/internal'; 14 | 15 | describe('with error of "no-speech"', () => { 16 | let constructSpeechRecognition: jest.Mock; 17 | let onDictate: jest.Mock, Parameters, undefined>; 18 | let onEnd: jest.Mock, Parameters, undefined>; 19 | let onError: jest.Mock, Parameters, undefined>; 20 | let onProgress: jest.Mock, Parameters, undefined>; 21 | let onStart: jest.Mock, Parameters, undefined>; 22 | let start: jest.SpyInstance | undefined; 23 | 24 | test('should dispatch events accordingly', () => { 25 | constructSpeechRecognition = jest.fn().mockImplementationOnce(() => { 26 | const speechRecognition = new SpeechRecognition(); 27 | 28 | start = jest.spyOn(speechRecognition, 'start'); 29 | 30 | return speechRecognition; 31 | }); 32 | 33 | onDictate = jest.fn(); 34 | onEnd = jest.fn(); 35 | onError = jest.fn(); 36 | onProgress = jest.fn(); 37 | onStart = jest.fn(); 38 | 39 | render( 40 | 50 | Click me 51 | 52 | ); 53 | 54 | act(() => { 55 | fireEvent.click(screen.getByText('Click me')); 56 | }); 57 | 58 | const speechRecognition: SpeechRecognition = constructSpeechRecognition.mock.results[0]?.value; 59 | 60 | expect(onProgress).toHaveBeenCalledTimes(0); 61 | expect(start).toHaveBeenCalledTimes(1); 62 | 63 | act(() => { 64 | speechRecognition.dispatchEvent(new Event('start', {})); 65 | speechRecognition.dispatchEvent(new Event('audiostart', {})); 66 | }); 67 | 68 | expect(onEnd).toHaveBeenCalledTimes(0); 69 | expect(onStart).toHaveBeenCalledTimes(1); 70 | 71 | onEnd.mockReset(); 72 | onStart.mockReset(); 73 | 74 | expect(onProgress).toHaveBeenCalledTimes(1); 75 | expect(onProgress.mock.calls[0][0]).toHaveProperty('type', 'progress'); 76 | expect(onProgress.mock.calls[0][0]).not.toHaveProperty('results', expect.anything()); 77 | 78 | // --- 79 | 80 | act(() => { 81 | speechRecognition.dispatchEvent(new Event('audioend', {})); 82 | speechRecognition.dispatchEvent(new SpeechRecognitionErrorEvent('error', { error: 'no-speech', message: '' })); 83 | speechRecognition.dispatchEvent(new Event('end', {})); 84 | }); 85 | 86 | expect(onEnd).toHaveBeenCalledTimes(1); 87 | expect(onError).toHaveBeenCalledTimes(1); 88 | expect(onError).toHaveBeenLastCalledWith(expect.any(SpeechRecognitionErrorEvent)); 89 | expect(onError).toHaveBeenLastCalledWith( 90 | expect.objectContaining({ 91 | error: 'no-speech', 92 | type: 'error' 93 | }) 94 | ); 95 | }); 96 | }); 97 | -------------------------------------------------------------------------------- /packages/react-dictate-button/__tests__/multipleInterims.spec.tsx: -------------------------------------------------------------------------------- 1 | /** @jest-environment @happy-dom/jest-environment */ 2 | 3 | import { act, fireEvent, render, screen } from '@testing-library/react'; 4 | import React from 'react'; 5 | import { 6 | DictateButton, 7 | type DictateEventHandler, 8 | type EndEventHandler, 9 | type ProgressEventHandler, 10 | type StartEventHandler 11 | } from '../src/index'; 12 | import { 13 | SpeechRecognition, 14 | SpeechRecognitionAlternative, 15 | SpeechRecognitionEvent, 16 | SpeechRecognitionResult, 17 | SpeechRecognitionResultList 18 | } from '../src/internal'; 19 | 20 | describe('with multiple non-finalized interims', () => { 21 | let constructSpeechRecognition: jest.Mock; 22 | let onDictate: jest.Mock, Parameters, undefined>; 23 | let onEnd: jest.Mock, Parameters, undefined>; 24 | let onProgress: jest.Mock, Parameters, undefined>; 25 | let onStart: jest.Mock, Parameters, undefined>; 26 | let start: jest.SpyInstance | undefined; 27 | 28 | test('should onDictate and onProgress event accordingly', () => { 29 | constructSpeechRecognition = jest.fn().mockImplementationOnce(() => { 30 | const speechRecognition = new SpeechRecognition(); 31 | 32 | start = jest.spyOn(speechRecognition, 'start'); 33 | 34 | return speechRecognition; 35 | }); 36 | 37 | onDictate = jest.fn(); 38 | onEnd = jest.fn(); 39 | onProgress = jest.fn(); 40 | onStart = jest.fn(); 41 | 42 | render( 43 | 52 | Click me 53 | 54 | ); 55 | 56 | act(() => { 57 | fireEvent.click(screen.getByText('Click me')); 58 | }); 59 | 60 | const speechRecognition: SpeechRecognition = constructSpeechRecognition.mock.results[0]?.value; 61 | 62 | expect(onProgress).toHaveBeenCalledTimes(0); 63 | expect(start).toHaveBeenCalledTimes(1); 64 | 65 | act(() => { 66 | speechRecognition.dispatchEvent(new Event('start', {})); 67 | speechRecognition.dispatchEvent(new Event('audiostart', {})); 68 | speechRecognition.dispatchEvent(new Event('soundstart', {})); 69 | speechRecognition.dispatchEvent(new Event('speechstart', {})); 70 | }); 71 | 72 | expect(onEnd).toHaveBeenCalledTimes(0); 73 | expect(onStart).toHaveBeenCalledTimes(1); 74 | 75 | onEnd.mockReset(); 76 | onStart.mockReset(); 77 | 78 | expect(onProgress).toHaveBeenCalledTimes(1); 79 | expect(onProgress.mock.calls[0][0]).toHaveProperty('type', 'progress'); 80 | expect(onProgress.mock.calls[0][0]).not.toHaveProperty('results', expect.anything()); 81 | 82 | // --- 83 | 84 | onDictate.mockReset(); 85 | onProgress.mockReset(); 86 | 87 | act(() => { 88 | speechRecognition.dispatchEvent( 89 | new SpeechRecognitionEvent('result', { 90 | resultIndex: 0, 91 | results: new SpeechRecognitionResultList( 92 | new SpeechRecognitionResult(new SpeechRecognitionAlternative(0.009999999776482582, 'test')) 93 | ) 94 | }) 95 | ); 96 | }); 97 | 98 | expect(onDictate).toHaveBeenCalledTimes(0); 99 | expect(onEnd).toHaveBeenCalledTimes(0); 100 | expect(onProgress).toHaveBeenCalledTimes(1); 101 | expect(onStart).toHaveBeenCalledTimes(0); 102 | expect(onProgress.mock.calls[0][0]).toHaveProperty('type', 'progress'); 103 | expect(onProgress.mock.calls[0][0]).toHaveProperty('results', [ 104 | { confidence: 0.009999999776482582, transcript: 'test' } 105 | ]); 106 | 107 | // --- 108 | 109 | onDictate.mockReset(); 110 | onProgress.mockReset(); 111 | 112 | act(() => { 113 | speechRecognition.dispatchEvent( 114 | new SpeechRecognitionEvent('result', { 115 | resultIndex: 0, 116 | results: new SpeechRecognitionResultList( 117 | new SpeechRecognitionResult(new SpeechRecognitionAlternative(0.009999999776482582, 'testing')) 118 | ) 119 | }) 120 | ); 121 | }); 122 | 123 | expect(onDictate).toHaveBeenCalledTimes(0); 124 | expect(onEnd).toHaveBeenCalledTimes(0); 125 | expect(onProgress).toHaveBeenCalledTimes(1); 126 | expect(onStart).toHaveBeenCalledTimes(0); 127 | expect(onProgress.mock.calls[0][0]).toHaveProperty('type', 'progress'); 128 | expect(onProgress.mock.calls[0][0]).toHaveProperty('results', [ 129 | { confidence: 0.009999999776482582, transcript: 'testing' } 130 | ]); 131 | 132 | // --- 133 | 134 | onDictate.mockReset(); 135 | onProgress.mockReset(); 136 | 137 | act(() => { 138 | speechRecognition.dispatchEvent( 139 | new SpeechRecognitionEvent('result', { 140 | resultIndex: 0, 141 | results: new SpeechRecognitionResultList( 142 | new SpeechRecognitionResult(new SpeechRecognitionAlternative(0.8999999761581421, 'testing')) 143 | ) 144 | }) 145 | ); 146 | }); 147 | 148 | expect(onDictate).toHaveBeenCalledTimes(0); 149 | expect(onEnd).toHaveBeenCalledTimes(0); 150 | expect(onProgress).toHaveBeenCalledTimes(1); 151 | expect(onStart).toHaveBeenCalledTimes(0); 152 | expect(onProgress.mock.calls[0][0]).toHaveProperty('type', 'progress'); 153 | expect(onProgress.mock.calls[0][0]).toHaveProperty('results', [ 154 | { confidence: 0.8999999761581421, transcript: 'testing' } 155 | ]); 156 | 157 | // --- 158 | 159 | onDictate.mockReset(); 160 | onProgress.mockReset(); 161 | 162 | act(() => { 163 | speechRecognition.dispatchEvent( 164 | new SpeechRecognitionEvent('result', { 165 | resultIndex: 0, 166 | results: new SpeechRecognitionResultList( 167 | new SpeechRecognitionResult(new SpeechRecognitionAlternative(0.8999999761581421, 'testing')), 168 | new SpeechRecognitionResult(new SpeechRecognitionAlternative(0.009999999776482582, ' one')) 169 | ) 170 | }) 171 | ); 172 | }); 173 | 174 | expect(onDictate).toHaveBeenCalledTimes(0); 175 | expect(onEnd).toHaveBeenCalledTimes(0); 176 | expect(onProgress).toHaveBeenCalledTimes(1); 177 | expect(onStart).toHaveBeenCalledTimes(0); 178 | expect(onProgress.mock.calls[0][0]).toHaveProperty('type', 'progress'); 179 | expect(onProgress.mock.calls[0][0]).toHaveProperty('results', [ 180 | { confidence: 0.8999999761581421, transcript: 'testing' }, 181 | { confidence: 0.009999999776482582, transcript: ' one' } 182 | ]); 183 | 184 | // --- 185 | 186 | onDictate.mockReset(); 187 | onProgress.mockReset(); 188 | 189 | act(() => { 190 | speechRecognition.dispatchEvent( 191 | new SpeechRecognitionEvent('result', { 192 | resultIndex: 0, 193 | results: new SpeechRecognitionResultList( 194 | new SpeechRecognitionResult(new SpeechRecognitionAlternative(0.8999999761581421, 'testing')), 195 | new SpeechRecognitionResult(new SpeechRecognitionAlternative(0.009999999776482582, ' one two')) 196 | ) 197 | }) 198 | ); 199 | }); 200 | 201 | expect(onDictate).toHaveBeenCalledTimes(0); 202 | expect(onEnd).toHaveBeenCalledTimes(0); 203 | expect(onProgress).toHaveBeenCalledTimes(1); 204 | expect(onStart).toHaveBeenCalledTimes(0); 205 | expect(onProgress.mock.calls[0][0]).toHaveProperty('type', 'progress'); 206 | expect(onProgress.mock.calls[0][0]).toHaveProperty('results', [ 207 | { confidence: 0.8999999761581421, transcript: 'testing' }, 208 | { confidence: 0.009999999776482582, transcript: ' one two' } 209 | ]); 210 | 211 | // --- 212 | 213 | onDictate.mockReset(); 214 | onProgress.mockReset(); 215 | 216 | act(() => { 217 | speechRecognition.dispatchEvent( 218 | new SpeechRecognitionEvent('result', { 219 | resultIndex: 0, 220 | results: new SpeechRecognitionResultList( 221 | new SpeechRecognitionResult(new SpeechRecognitionAlternative(0.8999999761581421, 'testing')), 222 | new SpeechRecognitionResult(new SpeechRecognitionAlternative(0.009999999776482582, ' 1 2 3')) 223 | ) 224 | }) 225 | ); 226 | }); 227 | 228 | expect(onDictate).toHaveBeenCalledTimes(0); 229 | expect(onEnd).toHaveBeenCalledTimes(0); 230 | expect(onProgress).toHaveBeenCalledTimes(1); 231 | expect(onStart).toHaveBeenCalledTimes(0); 232 | expect(onProgress.mock.calls[0][0]).toHaveProperty('type', 'progress'); 233 | expect(onProgress.mock.calls[0][0]).toHaveProperty('results', [ 234 | { confidence: 0.8999999761581421, transcript: 'testing' }, 235 | { confidence: 0.009999999776482582, transcript: ' 1 2 3' } 236 | ]); 237 | 238 | // --- 239 | 240 | onDictate.mockReset(); 241 | onProgress.mockReset(); 242 | 243 | act(() => { 244 | speechRecognition.dispatchEvent( 245 | new SpeechRecognitionEvent('result', { 246 | resultIndex: 0, 247 | results: new SpeechRecognitionResultList( 248 | new SpeechRecognitionResult(new SpeechRecognitionAlternative(0.8999999761581421, 'testing')), 249 | new SpeechRecognitionResult(new SpeechRecognitionAlternative(0.009999999776482582, ' one two three')) 250 | ) 251 | }) 252 | ); 253 | }); 254 | 255 | expect(onDictate).toHaveBeenCalledTimes(0); 256 | expect(onEnd).toHaveBeenCalledTimes(0); 257 | expect(onProgress).toHaveBeenCalledTimes(1); 258 | expect(onStart).toHaveBeenCalledTimes(0); 259 | expect(onProgress.mock.calls[0][0]).toHaveProperty('type', 'progress'); 260 | expect(onProgress.mock.calls[0][0]).toHaveProperty('results', [ 261 | { confidence: 0.8999999761581421, transcript: 'testing' }, 262 | { confidence: 0.009999999776482582, transcript: ' one two three' } 263 | ]); 264 | 265 | // --- 266 | 267 | onDictate.mockReset(); 268 | onProgress.mockReset(); 269 | 270 | act(() => { 271 | speechRecognition.dispatchEvent( 272 | new SpeechRecognitionEvent('result', { 273 | resultIndex: 0, 274 | results: new SpeechRecognitionResultList( 275 | SpeechRecognitionResult.fromFinalized( 276 | new SpeechRecognitionAlternative(0.5359774827957153, 'testing one two three') 277 | ) 278 | ) 279 | }) 280 | ); 281 | }); 282 | 283 | expect(onDictate).toHaveBeenCalledTimes(1); 284 | expect(onEnd).toHaveBeenCalledTimes(0); 285 | expect(onProgress).toHaveBeenCalledTimes(0); 286 | expect(onStart).toHaveBeenCalledTimes(0); 287 | expect(onDictate.mock.calls[0][0]).toHaveProperty('type', 'dictate'); 288 | expect(onDictate.mock.calls[0][0]).toHaveProperty('result', { 289 | confidence: 0.5359774827957153, 290 | transcript: 'testing one two three' 291 | }); 292 | 293 | // --- 294 | 295 | onDictate.mockReset(); 296 | onProgress.mockReset(); 297 | 298 | act(() => { 299 | speechRecognition.dispatchEvent(new Event('speechend')); 300 | speechRecognition.dispatchEvent(new Event('soundend')); 301 | speechRecognition.dispatchEvent(new Event('audioend')); 302 | speechRecognition.dispatchEvent(new Event('end')); 303 | }); 304 | 305 | expect(onDictate).toHaveBeenCalledTimes(0); 306 | expect(onEnd).toHaveBeenCalledTimes(1); 307 | expect(onProgress).toHaveBeenCalledTimes(0); 308 | expect(onStart).toHaveBeenCalledTimes(0); 309 | }); 310 | }); 311 | -------------------------------------------------------------------------------- /packages/react-dictate-button/__tests__/simple.checkbox.spec.tsx: -------------------------------------------------------------------------------- 1 | /** @jest-environment @happy-dom/jest-environment */ 2 | 3 | import { act, fireEvent, render, screen } from '@testing-library/react'; 4 | import React from 'react'; 5 | import { DictateCheckbox, type DictateEventHandler, type EndEventHandler, type StartEventHandler } from '../src/index'; 6 | import { 7 | SpeechRecognition, 8 | SpeechRecognitionAlternative, 9 | SpeechRecognitionEvent, 10 | SpeechRecognitionResult, 11 | SpeechRecognitionResultList 12 | } from '../src/internal'; 13 | 14 | describe('simple scenario for ', () => { 15 | let constructSpeechRecognition: jest.Mock; 16 | let eventNames: string[]; 17 | let onDictate: jest.Mock, Parameters, undefined>; 18 | let onEnd: jest.Mock, Parameters, undefined>; 19 | let onStart: jest.Mock, Parameters, undefined>; 20 | let start: jest.SpyInstance | undefined; 21 | 22 | beforeEach(() => { 23 | constructSpeechRecognition = jest.fn().mockImplementationOnce(() => { 24 | const speechRecognition = new SpeechRecognition(); 25 | 26 | start = jest.spyOn(speechRecognition, 'start'); 27 | 28 | return speechRecognition; 29 | }); 30 | 31 | eventNames = []; 32 | 33 | onDictate = jest.fn, Parameters, undefined>(() => 34 | eventNames.push('dictate') 35 | ); 36 | 37 | onEnd = jest.fn, Parameters, undefined>(() => eventNames.push('end')); 38 | 39 | onStart = jest.fn, Parameters, undefined>(() => 40 | eventNames.push('start') 41 | ); 42 | 43 | render( 44 | 51 | Click me 52 | 53 | ); 54 | }); 55 | 56 | describe('when the dictate checkbox is checked', () => { 57 | beforeEach(() => { 58 | expect(constructSpeechRecognition).toHaveBeenCalledTimes(0); 59 | 60 | act(() => { 61 | fireEvent.click(screen.getByText('Click me')); 62 | }); 63 | }); 64 | 65 | test('should be checked', () => 66 | expect(screen.getByText('Click me').querySelector('input')).toHaveProperty('checked', true)); 67 | 68 | test('SpeechRecognition object should be constructed', () => 69 | expect(constructSpeechRecognition).toHaveBeenCalledTimes(1)); 70 | 71 | test('start() should have been called once', () => expect(start).toHaveBeenCalledTimes(1)); 72 | test('onStart() should not been called', () => expect(onStart).toHaveBeenCalledTimes(0)); 73 | 74 | describe('when start events are dispatched', () => { 75 | let speechRecognition: SpeechRecognition; 76 | 77 | beforeEach(() => { 78 | act(() => { 79 | speechRecognition = constructSpeechRecognition.mock.results[0]?.value; 80 | 81 | speechRecognition.dispatchEvent(new Event('start', {})); 82 | speechRecognition.dispatchEvent(new Event('audiostart', {})); 83 | speechRecognition.dispatchEvent(new Event('soundstart', {})); 84 | speechRecognition.dispatchEvent(new Event('speechstart', {})); 85 | }); 86 | }); 87 | 88 | test('onStart() should be called', () => expect(onStart).toHaveBeenCalledTimes(1)); 89 | test('onEnd() should not be called', () => expect(onEnd).toHaveBeenCalledTimes(0)); 90 | 91 | describe('when result event is dispatched', () => { 92 | beforeEach(() => { 93 | act(() => { 94 | speechRecognition.dispatchEvent( 95 | new SpeechRecognitionEvent('result', { 96 | resultIndex: 0, 97 | results: new SpeechRecognitionResultList( 98 | SpeechRecognitionResult.fromFinalized(new SpeechRecognitionAlternative(0.9, 'Hello, World!')) 99 | ) 100 | }) 101 | ); 102 | }); 103 | }); 104 | 105 | describe('onDictate() should have been called', () => { 106 | test('once', () => expect(onDictate).toHaveBeenCalledTimes(1)); 107 | test('with result', () => 108 | expect(onDictate).toHaveBeenLastCalledWith( 109 | expect.objectContaining({ 110 | result: { confidence: 0.9, transcript: 'Hello, World!' }, 111 | type: 'dictate' 112 | }) 113 | )); 114 | }); 115 | 116 | test('onStart() should not been called again', () => expect(onStart).toHaveBeenCalledTimes(1)); 117 | test('onEnd() should not be called', () => expect(onEnd).toHaveBeenCalledTimes(0)); 118 | 119 | describe('when end events are dispatched', () => { 120 | beforeEach(() => { 121 | act(() => { 122 | speechRecognition.dispatchEvent(new Event('speechend', {})); 123 | speechRecognition.dispatchEvent(new Event('soundend', {})); 124 | speechRecognition.dispatchEvent(new Event('audioend', {})); 125 | speechRecognition.dispatchEvent(new Event('end', {})); 126 | }); 127 | }); 128 | 129 | test('onStart() should not be called again', () => expect(onStart).toHaveBeenCalledTimes(1)); 130 | test('onEnd() should be called', () => expect(onEnd).toHaveBeenCalledTimes(1)); 131 | 132 | test('events should appear in right order', () => expect(eventNames).toEqual(['start', 'dictate', 'end'])); 133 | test('should be unchecked', () => 134 | expect(screen.getByText('Click me').querySelector('input')).toHaveProperty('checked', false)); 135 | }); 136 | }); 137 | }); 138 | }); 139 | }); 140 | -------------------------------------------------------------------------------- /packages/react-dictate-button/__tests__/simple.spec.tsx: -------------------------------------------------------------------------------- 1 | /** @jest-environment @happy-dom/jest-environment */ 2 | 3 | import { act, fireEvent, render, screen } from '@testing-library/react'; 4 | import React from 'react'; 5 | import { DictateButton, type DictateEventHandler, type EndEventHandler, type StartEventHandler } from '../src/index'; 6 | import { 7 | SpeechRecognition, 8 | SpeechRecognitionAlternative, 9 | SpeechRecognitionEvent, 10 | SpeechRecognitionResult, 11 | SpeechRecognitionResultList 12 | } from '../src/internal'; 13 | 14 | describe('simple scenario', () => { 15 | let constructSpeechRecognition: jest.Mock; 16 | let eventNames: string[]; 17 | let onDictate: jest.Mock, Parameters, undefined>; 18 | let onEnd: jest.Mock, Parameters, undefined>; 19 | let onStart: jest.Mock, Parameters, undefined>; 20 | let start: jest.SpyInstance | undefined; 21 | 22 | beforeEach(() => { 23 | constructSpeechRecognition = jest.fn().mockImplementationOnce(() => { 24 | const speechRecognition = new SpeechRecognition(); 25 | 26 | start = jest.spyOn(speechRecognition, 'start'); 27 | 28 | return speechRecognition; 29 | }); 30 | 31 | eventNames = []; 32 | 33 | onDictate = jest.fn, Parameters, undefined>(() => 34 | eventNames.push('dictate') 35 | ); 36 | 37 | onEnd = jest.fn, Parameters, undefined>(() => eventNames.push('end')); 38 | 39 | onStart = jest.fn, Parameters, undefined>(() => 40 | eventNames.push('start') 41 | ); 42 | 43 | render( 44 | 51 | Click me 52 | 53 | ); 54 | }); 55 | 56 | describe('when the dictate button is clicked', () => { 57 | beforeEach(() => { 58 | expect(constructSpeechRecognition).toHaveBeenCalledTimes(0); 59 | 60 | act(() => { 61 | fireEvent.click(screen.getByText('Click me')); 62 | }); 63 | }); 64 | 65 | test('SpeechRecognition object should be constructed', () => 66 | expect(constructSpeechRecognition).toHaveBeenCalledTimes(1)); 67 | 68 | test('start() should have been called once', () => expect(start).toHaveBeenCalledTimes(1)); 69 | test('onStart() should not been called', () => expect(onStart).toHaveBeenCalledTimes(0)); 70 | 71 | describe('when start events are dispatched', () => { 72 | let speechRecognition: SpeechRecognition; 73 | 74 | beforeEach(() => { 75 | act(() => { 76 | speechRecognition = constructSpeechRecognition.mock.results[0]?.value; 77 | 78 | speechRecognition.dispatchEvent(new Event('start', {})); 79 | speechRecognition.dispatchEvent(new Event('audiostart', {})); 80 | speechRecognition.dispatchEvent(new Event('soundstart', {})); 81 | speechRecognition.dispatchEvent(new Event('speechstart', {})); 82 | }); 83 | }); 84 | 85 | test('onStart() should have been called once', () => expect(onStart).toHaveBeenCalledTimes(1)); 86 | test('onEnd() should not be called', () => expect(onEnd).toHaveBeenCalledTimes(0)); 87 | 88 | describe('when result events are dispatched', () => { 89 | beforeEach(() => { 90 | act(() => { 91 | speechRecognition.dispatchEvent( 92 | new SpeechRecognitionEvent('result', { 93 | resultIndex: 0, 94 | results: new SpeechRecognitionResultList( 95 | SpeechRecognitionResult.fromFinalized(new SpeechRecognitionAlternative(0.9, 'Hello, World!')) 96 | ) 97 | }) 98 | ); 99 | }); 100 | }); 101 | 102 | describe('onDictate() should have been called', () => { 103 | test('once', () => expect(onDictate).toHaveBeenCalledTimes(1)); 104 | test('with result', () => 105 | expect(onDictate).toHaveBeenLastCalledWith( 106 | expect.objectContaining({ 107 | result: { confidence: 0.9, transcript: 'Hello, World!' }, 108 | type: 'dictate' 109 | }) 110 | )); 111 | }); 112 | 113 | test('onStart() should not been called again', () => expect(onStart).toHaveBeenCalledTimes(1)); 114 | test('onEnd() should not be called', () => expect(onEnd).toHaveBeenCalledTimes(0)); 115 | 116 | describe('when end events are dispatched', () => { 117 | beforeEach(() => { 118 | act(() => { 119 | speechRecognition.dispatchEvent(new Event('speechend', {})); 120 | speechRecognition.dispatchEvent(new Event('soundend', {})); 121 | speechRecognition.dispatchEvent(new Event('audioend', {})); 122 | speechRecognition.dispatchEvent(new Event('end', {})); 123 | }); 124 | }); 125 | 126 | test('onStart() should not be called again', () => expect(onStart).toHaveBeenCalledTimes(1)); 127 | test('onEnd() should have been called once', () => expect(onEnd).toHaveBeenCalledTimes(1)); 128 | 129 | test('events should appear in right order', () => expect(eventNames).toEqual(['start', 'dictate', 'end'])); 130 | }); 131 | }); 132 | }); 133 | }); 134 | }); 135 | -------------------------------------------------------------------------------- /packages/react-dictate-button/__tests__/stopAfterOnDictate.spec.tsx: -------------------------------------------------------------------------------- 1 | /** @jest-environment @happy-dom/jest-environment */ 2 | 3 | import { act, fireEvent, render, screen, type RenderResult } from '@testing-library/react'; 4 | import React, { Fragment } from 'react'; 5 | import { DictateButton, type DictateEventHandler } from '../src/index'; 6 | import { 7 | SpeechRecognition, 8 | SpeechRecognitionAlternative, 9 | SpeechRecognitionEvent, 10 | SpeechRecognitionResult, 11 | SpeechRecognitionResultList 12 | } from '../src/internal'; 13 | 14 | describe('with SpeechRecognition object without abort() stop after onDictate', () => { 15 | let constructSpeechRecognition: jest.Mock; 16 | let onDictate: jest.Mock, Parameters, undefined>; 17 | let renderResult: RenderResult; 18 | let start: jest.SpyInstance | undefined; 19 | 20 | beforeEach(() => { 21 | jest.spyOn(console, 'warn').mockImplementation(() => {}); 22 | 23 | constructSpeechRecognition = jest.fn().mockImplementationOnce(() => { 24 | const speechRecognition = new SpeechRecognition(); 25 | 26 | // @ts-expect-error forcifully remove abort(). 27 | speechRecognition.abort = undefined; 28 | start = jest.spyOn(speechRecognition, 'start'); 29 | 30 | return speechRecognition; 31 | }); 32 | 33 | onDictate = jest.fn(); 34 | 35 | renderResult = render( 36 | 41 | Click me 42 | 43 | ); 44 | 45 | act(() => { 46 | fireEvent.click(screen.getByText('Click me')); 47 | }); 48 | 49 | expect(constructSpeechRecognition).toHaveBeenCalledTimes(1); 50 | expect(start).toHaveBeenCalledTimes(1); 51 | }); 52 | 53 | describe('when speech events are dispatched', () => { 54 | beforeEach(() => { 55 | act(() => { 56 | const speechRecognition: SpeechRecognition = constructSpeechRecognition.mock.results[0]?.value; 57 | 58 | speechRecognition.dispatchEvent(new Event('start', {})); 59 | speechRecognition.dispatchEvent(new Event('audiostart', {})); 60 | speechRecognition.dispatchEvent(new Event('soundstart', {})); 61 | speechRecognition.dispatchEvent(new Event('speechstart', {})); 62 | 63 | speechRecognition.dispatchEvent( 64 | new SpeechRecognitionEvent('result', { 65 | resultIndex: 0, 66 | results: new SpeechRecognitionResultList( 67 | SpeechRecognitionResult.fromFinalized(new SpeechRecognitionAlternative(0.9, 'Hello, World!')) 68 | ) 69 | }) 70 | ); 71 | 72 | speechRecognition.dispatchEvent(new Event('speechend', {})); 73 | speechRecognition.dispatchEvent(new Event('soundend', {})); 74 | speechRecognition.dispatchEvent(new Event('audioend', {})); 75 | speechRecognition.dispatchEvent(new Event('end', {})); 76 | }); 77 | 78 | expect(onDictate).toHaveBeenCalledTimes(1); 79 | }); 80 | 81 | describe('unmounting the button after onDictate', () => { 82 | beforeEach(() => { 83 | renderResult.rerender(); 84 | }); 85 | 86 | test('should warn for unabortable recognition', () => { 87 | expect(console.warn).toHaveBeenCalledTimes(1); 88 | expect(console.warn).toHaveBeenLastCalledWith( 89 | 'react-dictate-state: Cannot stop because SpeechRecognition does not have abort() function.' 90 | ); 91 | }); 92 | }); 93 | }); 94 | 95 | // We cannot test "throw during unmount" because: 96 | // 1. After throw, React seems stuck in unrecoverable state that it think it is still rendering 97 | // 2. @testing-library/react will call unmount() 98 | // 3. When calling unmount() while React think it is still rendering, it will throw another error and this is not catchable in Jest 99 | }); 100 | -------------------------------------------------------------------------------- /packages/react-dictate-button/__tests__/stopBeforeResult.spec.tsx: -------------------------------------------------------------------------------- 1 | /** @jest-environment @happy-dom/jest-environment */ 2 | 3 | import { act, fireEvent, render, screen, type RenderResult } from '@testing-library/react'; 4 | import React from 'react'; 5 | import { 6 | DictateCheckbox, 7 | type DictateEventHandler, 8 | type EndEventHandler, 9 | type ProgressEventHandler, 10 | type StartEventHandler 11 | } from '../src/index'; 12 | import { 13 | SpeechRecognition, 14 | SpeechRecognitionAlternative, 15 | SpeechRecognitionEvent, 16 | SpeechRecognitionResult, 17 | SpeechRecognitionResultList 18 | } from '../src/internal'; 19 | 20 | describe('when stopping recognition', () => { 21 | let constructSpeechRecognition: jest.Mock; 22 | let onDictate: jest.Mock, Parameters, undefined>; 23 | let onEnd: jest.Mock, Parameters, undefined>; 24 | let onProgress: jest.Mock, Parameters, undefined>; 25 | let onStart: jest.Mock, Parameters, undefined>; 26 | let renderResult: RenderResult; 27 | let start: jest.SpyInstance | undefined; 28 | 29 | beforeEach(() => { 30 | constructSpeechRecognition = jest.fn().mockImplementationOnce(() => { 31 | const speechRecognition = new SpeechRecognition(); 32 | 33 | start = jest.spyOn(speechRecognition, 'start'); 34 | 35 | return speechRecognition; 36 | }); 37 | 38 | onDictate = jest.fn(); 39 | onEnd = jest.fn(); 40 | onProgress = jest.fn(); 41 | onStart = jest.fn(); 42 | 43 | renderResult = render( 44 | 52 | Click me 53 | 54 | ); 55 | 56 | act(() => { 57 | fireEvent.click(screen.getByText('Click me')); 58 | }); 59 | 60 | expect(constructSpeechRecognition).toHaveBeenCalledTimes(1); 61 | expect(start).toHaveBeenCalledTimes(1); 62 | }); 63 | 64 | describe('when interim speech events are dispatched', () => { 65 | beforeEach(() => { 66 | act(() => { 67 | const speechRecognition: SpeechRecognition = constructSpeechRecognition.mock.results[0]?.value; 68 | 69 | speechRecognition.dispatchEvent(new Event('start', {})); 70 | speechRecognition.dispatchEvent(new Event('audiostart', {})); 71 | speechRecognition.dispatchEvent(new Event('soundstart', {})); 72 | speechRecognition.dispatchEvent(new Event('speechstart', {})); 73 | 74 | speechRecognition.dispatchEvent( 75 | new SpeechRecognitionEvent('result', { 76 | resultIndex: 0, 77 | results: new SpeechRecognitionResultList( 78 | new SpeechRecognitionResult(new SpeechRecognitionAlternative(0.9, 'Hello, World!')) 79 | ) 80 | }) 81 | ); 82 | }); 83 | }); 84 | 85 | test('should not emit "onEnd"', () => expect(onEnd).toHaveBeenCalledTimes(0)); 86 | test('should emit "onStart"', () => expect(onStart).toHaveBeenCalledTimes(1)); 87 | 88 | test('should emit "onProgress" twice', () => { 89 | expect(onProgress).toHaveBeenCalledTimes(2); 90 | 91 | // From "audiostart" event, no "results". 92 | expect(onProgress).toHaveBeenNthCalledWith(1, { 93 | abortable: true, 94 | type: 'progress' 95 | }); 96 | 97 | // From "result" event with falsy "isFinal", no "results". 98 | expect(onProgress).toHaveBeenNthCalledWith(2, { 99 | abortable: true, 100 | results: [{ confidence: 0.9, transcript: 'Hello, World!' }], 101 | type: 'progress' 102 | }); 103 | }); 104 | 105 | // The paired "dictate" event should only be dispatched immediately before "end" event. 106 | test('should not emit "onDictate"', () => expect(onDictate).toHaveBeenCalledTimes(0)); 107 | 108 | describe('when checkbox is unchecked', () => { 109 | beforeEach(() => { 110 | act(() => { 111 | fireEvent.click(screen.getByText('Click me')); 112 | }); 113 | }); 114 | 115 | test('should not emit "onDictate"', () => expect(onDictate).toHaveBeenCalledTimes(0)); 116 | test('should not emit "onEnd"', () => expect(onEnd).toHaveBeenCalledTimes(0)); 117 | 118 | describe('when "end" event is dispatched', () => { 119 | beforeEach(() => { 120 | act(() => { 121 | const speechRecognition: SpeechRecognition = constructSpeechRecognition.mock.results[0]?.value; 122 | 123 | speechRecognition.dispatchEvent(new Event('speechend', {})); 124 | speechRecognition.dispatchEvent(new Event('soundend', {})); 125 | speechRecognition.dispatchEvent(new Event('audioend', {})); 126 | speechRecognition.dispatchEvent(new Event('end', {})); 127 | }); 128 | }); 129 | 130 | test('should emit "onEnd"', () => expect(onEnd).toHaveBeenCalledTimes(1)); 131 | 132 | // If "onProgress" is dispatched, a corresponding "onDictate" event must be dispatched in pair. 133 | test('should emit "onDictate" without "result"', () => { 134 | expect(onDictate).toHaveBeenCalledTimes(1); 135 | expect(onDictate).toHaveBeenLastCalledWith({ type: 'dictate' }); 136 | }); 137 | }); 138 | }); 139 | }); 140 | }); 141 | -------------------------------------------------------------------------------- /packages/react-dictate-button/__tests__/stopWithoutFinalize.spec.tsx: -------------------------------------------------------------------------------- 1 | /** @jest-environment @happy-dom/jest-environment */ 2 | 3 | import { act, fireEvent, render, screen, type RenderResult } from '@testing-library/react'; 4 | import React from 'react'; 5 | import { 6 | DictateButton, 7 | type DictateEventHandler, 8 | type EndEventHandler, 9 | type ProgressEventHandler, 10 | type StartEventHandler 11 | } from '../src/index'; 12 | import { 13 | SpeechRecognition, 14 | SpeechRecognitionAlternative, 15 | SpeechRecognitionEvent, 16 | SpeechRecognitionResult, 17 | SpeechRecognitionResultList 18 | } from '../src/internal'; 19 | 20 | describe('end without "result" event with "isFinal" set to true', () => { 21 | let constructSpeechRecognition: jest.Mock; 22 | let onDictate: jest.Mock, Parameters, undefined>; 23 | let onEnd: jest.Mock, Parameters, undefined>; 24 | let onProgress: jest.Mock, Parameters, undefined>; 25 | let onStart: jest.Mock, Parameters, undefined>; 26 | let renderResult: RenderResult; 27 | let start: jest.SpyInstance | undefined; 28 | 29 | beforeEach(() => { 30 | constructSpeechRecognition = jest.fn().mockImplementationOnce(() => { 31 | const speechRecognition = new SpeechRecognition(); 32 | 33 | start = jest.spyOn(speechRecognition, 'start'); 34 | 35 | return speechRecognition; 36 | }); 37 | 38 | onDictate = jest.fn(); 39 | onEnd = jest.fn(); 40 | onProgress = jest.fn(); 41 | onStart = jest.fn(); 42 | 43 | renderResult = render( 44 | 52 | Click me 53 | 54 | ); 55 | 56 | act(() => { 57 | fireEvent.click(screen.getByText('Click me')); 58 | }); 59 | 60 | expect(constructSpeechRecognition).toHaveBeenCalledTimes(1); 61 | expect(start).toHaveBeenCalledTimes(1); 62 | }); 63 | 64 | describe('when speech events are dispatched', () => { 65 | beforeEach(() => { 66 | act(() => { 67 | const speechRecognition: SpeechRecognition = constructSpeechRecognition.mock.results[0]?.value; 68 | 69 | speechRecognition.dispatchEvent(new Event('start', {})); 70 | speechRecognition.dispatchEvent(new Event('audiostart', {})); 71 | speechRecognition.dispatchEvent(new Event('soundstart', {})); 72 | speechRecognition.dispatchEvent(new Event('speechstart', {})); 73 | 74 | speechRecognition.dispatchEvent( 75 | new SpeechRecognitionEvent('result', { 76 | resultIndex: 0, 77 | results: new SpeechRecognitionResultList( 78 | new SpeechRecognitionResult(new SpeechRecognitionAlternative(0.9, 'Hello, World!')) 79 | ) 80 | }) 81 | ); 82 | 83 | speechRecognition.dispatchEvent(new Event('speechend', {})); 84 | speechRecognition.dispatchEvent(new Event('soundend', {})); 85 | speechRecognition.dispatchEvent(new Event('audioend', {})); 86 | speechRecognition.dispatchEvent(new Event('end', {})); 87 | }); 88 | }); 89 | 90 | test('should emit "onEnd"', () => expect(onEnd).toHaveBeenCalledTimes(1)); 91 | test('should emit "onStart"', () => expect(onStart).toHaveBeenCalledTimes(1)); 92 | 93 | test('should emit "onProgress" twice', () => { 94 | expect(onProgress).toHaveBeenCalledTimes(2); 95 | 96 | // From "audiostart" event, no "results". 97 | expect(onProgress).toHaveBeenNthCalledWith(1, { 98 | abortable: true, 99 | type: 'progress' 100 | }); 101 | 102 | // From "result" event with falsy "isFinal", no "results". 103 | expect(onProgress).toHaveBeenNthCalledWith(2, { 104 | abortable: true, 105 | results: [{ confidence: 0.9, transcript: 'Hello, World!' }], 106 | type: 'progress' 107 | }); 108 | }); 109 | 110 | // If "onProgress" is dispatched, a corresponding "onDictate" event must be dispatched in pair. 111 | test('should emit "onDictate" without "result"', () => { 112 | expect(onDictate).toHaveBeenCalledTimes(1); 113 | expect(onDictate).toHaveBeenLastCalledWith({ type: 'dictate' }); 114 | }); 115 | }); 116 | }); 117 | -------------------------------------------------------------------------------- /packages/react-dictate-button/__tests__/types/.gitignore: -------------------------------------------------------------------------------- 1 | *.tmp.tsx 2 | -------------------------------------------------------------------------------- /packages/react-dictate-button/jest.config.json: -------------------------------------------------------------------------------- 1 | { 2 | "testPathIgnorePatterns": ["/__setup__/", "/lib/", "/node_modules/", "/__types__/", "\\.pnp\\.[^\\/]+$"], 3 | "transform": { 4 | "/__tests__/types/": ["/__tests__/__setup__/typingTestTransformer.js"], 5 | "\\.[jt]sx?$": [ 6 | "babel-jest", 7 | { 8 | "plugins": [["babel-plugin-transform-define", { "IS_DEVELOPMENT": true }]], 9 | "presets": [ 10 | ["@babel/preset-react", { "runtime": "classic" }], 11 | ["@babel/preset-typescript", { "allowDeclareFields": true }], 12 | [ 13 | "@babel/preset-env", 14 | { 15 | "modules": "commonjs", 16 | "targets": { "node": "20" } 17 | } 18 | ] 19 | ], 20 | "sourceMaps": true 21 | } 22 | ] 23 | }, 24 | "watchPathIgnorePatterns": ["\\.tmp\\."] 25 | } 26 | -------------------------------------------------------------------------------- /packages/react-dictate-button/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "react-dictate-button", 3 | "version": "0.0.0-0", 4 | "description": "A button to start dictation using Web Speech API, with an easy to understand event lifecycle.", 5 | "files": [ 6 | "./dist/" 7 | ], 8 | "exports": { 9 | "./internal": { 10 | "import": { 11 | "types": "./dist/react-dictate-button.internal.d.mts", 12 | "default": "./dist/react-dictate-button.internal.mjs" 13 | }, 14 | "require": { 15 | "types": "./dist/react-dictate-button.internal.d.ts", 16 | "default": "./dist/react-dictate-button.internal.js" 17 | } 18 | }, 19 | ".": { 20 | "import": { 21 | "types": "./dist/react-dictate-button.d.mts", 22 | "default": "./dist/react-dictate-button.mjs" 23 | }, 24 | "require": { 25 | "types": "./dist/react-dictate-button.d.ts", 26 | "default": "./dist/react-dictate-button.js" 27 | } 28 | } 29 | }, 30 | "main": "./dist/react-dictate-button.js", 31 | "typings": "./dist/react-dictate-button.d.ts", 32 | "scripts": { 33 | "build": "tsup", 34 | "bump": "npm run bump:prod && npm run bump:dev", 35 | "bump:dev": "PACKAGES_TO_BUMP=$(cat package.json | jq -r '(.pinDependencies // {}) as $P | (.localPeerDependencies // {}) as $L | (.devDependencies // {}) | to_entries | map(select(.key as $K | $L | has($K) | not)) | map(.key + \"@\" + ($P[.key] // [\"latest\"])[0]) | join(\" \")') && [ ! -z \"$PACKAGES_TO_BUMP\" ] && npm install $PACKAGES_TO_BUMP || true", 36 | "bump:prod": "PACKAGES_TO_BUMP=$(cat package.json | jq -r '(.pinDependencies // {}) as $P | (.localPeerDependencies // {}) as $L | (.dependencies // {}) | to_entries | map(select(.key as $K | $L | has($K) | not)) | map(.key + \"@\" + ($P[.key] // [\"latest\"])[0]) | join(\" \")') && [ ! -z \"$PACKAGES_TO_BUMP\" ] && npm install $PACKAGES_TO_BUMP || true", 37 | "precommit": "npm run precommit:eslint && npm run precommit:publint && npm run precommit:typescript:production && npm run precommit:typescript:test", 38 | "precommit:eslint": "ESLINT_USE_FLAT_CONFIG=false eslint ./src/", 39 | "precommit:publint": "publint", 40 | "precommit:typescript:production": "tsc --noEmit --project ./src/tsconfig.precommit.production.json", 41 | "precommit:typescript:test": "tsc --noEmit --project ./src/tsconfig.precommit.test.json", 42 | "prepack": "cp ../../CHANGELOG.md . && cp ../../LICENSE . && cp ../../README.md .", 43 | "start": "npm run build -- --onSuccess=\"touch ../pages/src/index.jsx ../integration-test/jest.config.json\" --watch", 44 | "switch": "cat package.json | jq --arg SWITCH_NAME $SWITCH_NAME -r '(.[\"switch:\" + $SWITCH_NAME] // {}) as $TEMPLATE | .devDependencies += ($TEMPLATE.devDependencies // {}) | .dependencies += ($TEMPLATE.dependencies // {})' | tee ./package.json.tmp && mv ./package.json.tmp ./package.json", 45 | "test": "jest" 46 | }, 47 | "repository": { 48 | "type": "git", 49 | "url": "git+https://github.com/compulim/react-dictate-button.git" 50 | }, 51 | "keywords": [ 52 | "dictate", 53 | "dictation", 54 | "rtc", 55 | "speech recognition", 56 | "speech to text", 57 | "voice recognition", 58 | "voice", 59 | "web rtc", 60 | "web speech", 61 | "webrtc", 62 | "webspeech" 63 | ], 64 | "author": "William Wong (https://github.com/compulim)", 65 | "license": "MIT", 66 | "bugs": { 67 | "url": "https://github.com/compulim/react-dictate-button/issues" 68 | }, 69 | "homepage": "https://github.com/compulim/react-dictate-button#readme", 70 | "switch:react-16": { 71 | "devDependencies": { 72 | "@testing-library/react": "^12", 73 | "@testing-library/react-hooks": "latest", 74 | "@types/react": "^16", 75 | "@types/react-dom": "^16", 76 | "react": "16.8.6", 77 | "react-dom": "16.8.6", 78 | "react-test-renderer": "16.8.6" 79 | } 80 | }, 81 | "switch:react-17": { 82 | "devDependencies": { 83 | "@testing-library/react": "^12", 84 | "@testing-library/react-hooks": "latest", 85 | "@types/react": "^17", 86 | "@types/react-dom": "^17", 87 | "react": "17.0.0", 88 | "react-dom": "17.0.0", 89 | "react-test-renderer": "17.0.0" 90 | } 91 | }, 92 | "switch:react-18": { 93 | "devDependencies": { 94 | "@testing-library/react": "^16", 95 | "@types/react": "^18", 96 | "@types/react-dom": "^18", 97 | "react": "18.0.0", 98 | "react-dom": "18.0.0", 99 | "react-test-renderer": "18.0.0" 100 | } 101 | }, 102 | "pinDependencies": { 103 | "@testing-library/react": "^16", 104 | "@types/react": "^18", 105 | "@types/react-dom": "^18", 106 | "react": "^18", 107 | "react-dom": "^18", 108 | "react-test-renderer": "^18" 109 | }, 110 | "devDependencies": { 111 | "@babel/preset-env": "^7.25.8", 112 | "@babel/preset-react": "^7.25.7", 113 | "@babel/preset-typescript": "^7.25.7", 114 | "@fluentui/react": "^8.121.4", 115 | "@happy-dom/jest-environment": "^16.7.3", 116 | "@testing-library/dom": "^10.4.0", 117 | "@testing-library/react": "^16.0.1", 118 | "@tsconfig/recommended": "^1.0.7", 119 | "@tsconfig/strictest": "^2.0.5", 120 | "@types/dom-speech-recognition": "^0.0.4", 121 | "@types/jest": "^29.5.13", 122 | "@types/lodash": "^4.17.10", 123 | "@types/node": "^22.7.5", 124 | "@types/react": "^18.3.11", 125 | "@types/react-dom": "^18.3.2", 126 | "babel-plugin-transform-define": "^2.1.4", 127 | "core-js": "^3.38.1", 128 | "esbuild": "^0.24.0", 129 | "jest": "^29.7.0", 130 | "prettier": "^3.3.3", 131 | "publint": "^0.2.11", 132 | "react": "^18.3.1", 133 | "react-dictate-button-mocked-speech-recognition": "^0.0.0-0", 134 | "react-dom": "^18.3.1", 135 | "react-test-renderer": "^18.3.1", 136 | "react-wrap-with": "^0.1.0", 137 | "tsup": "^8.3.0", 138 | "type-fest": "^4.32.0", 139 | "typescript": "^5.6.3" 140 | }, 141 | "peerDependencies": { 142 | "react": ">=16.8.6" 143 | }, 144 | "dependencies": { 145 | "@babel/runtime-corejs3": "^7.14.0", 146 | "core-js": "^3.12.1", 147 | "use-ref-from": "^0.1.0" 148 | } 149 | } 150 | -------------------------------------------------------------------------------- /packages/react-dictate-button/src/Composer.tsx: -------------------------------------------------------------------------------- 1 | /* eslint no-magic-numbers: ["error", { "ignore": [0, 1, 2, 3] }] */ 2 | 3 | import React, { useCallback, useEffect, useMemo, useRef, useState, type ReactNode } from 'react'; 4 | import { useRefFrom } from 'use-ref-from'; 5 | 6 | import Context from './Context.ts'; 7 | import { type DictateEventHandler } from './DictateEventHandler.ts'; 8 | import { type EndEventHandler } from './EndEventHandler.ts'; 9 | import { type ErrorEventHandler } from './ErrorEventHandler.ts'; 10 | import assert from './private/assert.ts'; 11 | import { type ProgressEventHandler } from './ProgressEventHandler.ts'; 12 | import { type RawEventHandler } from './RawEventHandler.ts'; 13 | import { type SpeechGrammarListPolyfill } from './SpeechGrammarListPolyfill.ts'; 14 | import { type SpeechRecognitionPolyfill } from './SpeechRecognitionPolyfill.ts'; 15 | import { type StartEventHandler } from './StartEventHandler.ts'; 16 | import { type TypedEventHandler } from './TypedEventHandler.ts'; 17 | import usePrevious from './usePrevious.ts'; 18 | import vendorPrefix from './vendorPrefix.ts'; 19 | 20 | type ComposerProps = { 21 | children?: 22 | | (( 23 | context: Readonly<{ 24 | abortable: boolean; 25 | readyState: number; 26 | supported: boolean; 27 | }> 28 | ) => ReactNode) 29 | | ReactNode 30 | | undefined; 31 | /** 32 | * Sets whether speech recognition is in continuous mode or interactive mode. 33 | * 34 | * Modifying this value during recognition will have no effect until restarted. 35 | */ 36 | continuous?: boolean | undefined; 37 | extra?: Record | undefined; 38 | grammar?: string | undefined; 39 | lang?: string | undefined; 40 | onDictate?: DictateEventHandler | undefined; 41 | onEnd?: EndEventHandler | undefined; 42 | onError?: ErrorEventHandler | undefined; 43 | onProgress?: ProgressEventHandler | undefined; 44 | onRawEvent?: RawEventHandler | undefined; 45 | onStart?: StartEventHandler | undefined; 46 | speechGrammarList?: SpeechGrammarListPolyfill | undefined; 47 | speechRecognition?: SpeechRecognitionPolyfill | undefined; 48 | started?: boolean | undefined; 49 | }; 50 | 51 | function recognitionAbortable(recognition: unknown): recognition is { 52 | // eslint-disable-next-line @typescript-eslint/no-explicit-any 53 | abort: () => void; 54 | } { 55 | return !!( 56 | recognition && 57 | typeof recognition === 'object' && 58 | 'abort' in recognition && 59 | typeof recognition.abort === 'function' 60 | ); 61 | } 62 | 63 | const Composer = ({ 64 | children, 65 | continuous, 66 | extra, 67 | grammar, 68 | lang, 69 | onDictate, 70 | onEnd, 71 | onError, 72 | onProgress, 73 | onRawEvent, 74 | onStart, 75 | speechGrammarList = navigator.mediaDevices && 76 | // @ts-expect-error navigator.mediaDevices.getUserMedia may not be defined in older browsers. 77 | navigator.mediaDevices.getUserMedia && 78 | vendorPrefix('SpeechGrammarList'), 79 | speechRecognition = navigator.mediaDevices && 80 | // @ts-expect-error navigator.mediaDevices.getUserMedia may not be defined in older browsers. 81 | navigator.mediaDevices.getUserMedia && 82 | vendorPrefix('SpeechRecognition'), 83 | started 84 | }: ComposerProps) => { 85 | const [readyState, setReadyState] = useState(0); 86 | const continuousRef = useRefFrom(continuous); 87 | const extraRef = useRefFrom(extra); 88 | const grammarRef = useRefFrom(grammar); 89 | const langRef = useRefFrom(lang); 90 | const notAllowedRef = useRef(false); 91 | const onDictateRef = useRefFrom(onDictate); 92 | const onEndRef = useRefFrom(onEnd); 93 | const onErrorRef = useRefFrom(onError); 94 | const onProgressRef = useRefFrom(onProgress); 95 | const onRawEventRef = useRefFrom(onRawEvent); 96 | const onStartRef = useRefFrom(onStart); 97 | const prevSpeechRecognition = usePrevious(speechRecognition); 98 | const recognitionRef = useRef(); 99 | const speechGrammarListRef = useRefFrom(speechGrammarList); 100 | const speechRecognitionClassRef = useRefFrom(speechRecognition); 101 | const stateRef = useRef<'idle' | 'started' | 'has progress' | 'has result' | 'error'>('idle'); 102 | const unmountedRef = useRef(false); 103 | 104 | // If "speechRecognition" ponyfill changed, reset the "notAllowed" flag. 105 | if (prevSpeechRecognition !== speechRecognition) { 106 | notAllowedRef.current = false; 107 | } 108 | 109 | const emitDictate = useCallback( 110 | event => { 111 | if (unmountedRef.current) { 112 | return; 113 | } 114 | 115 | assert(stateRef.current !== 'started'); 116 | 117 | onDictateRef.current?.(event); 118 | stateRef.current = 'has result'; 119 | }, 120 | [onDictateRef, stateRef] 121 | ); 122 | 123 | const emitEnd = useCallback(() => { 124 | if (unmountedRef.current) { 125 | return; 126 | } 127 | 128 | // "dictate" and "progress" works as a pair. If "progress" was emitted without "dictate", we should emit "dictate" before "end". 129 | if (stateRef.current === 'has progress') { 130 | emitDictate({ type: 'dictate' }); 131 | stateRef.current = 'has result'; 132 | } 133 | 134 | // "start" and "end" works as a pair. If "start" was emitted, we should emit "end" event. 135 | assert(stateRef.current === 'started' || stateRef.current === 'has result' || stateRef.current === 'error'); 136 | 137 | onEndRef.current?.(new Event('end') as Event & { type: 'end' }); 138 | 139 | if (stateRef.current !== 'error') { 140 | stateRef.current = 'idle'; 141 | } 142 | }, [onEndRef, stateRef]); 143 | 144 | const emitError = useCallback( 145 | event => { 146 | if (unmountedRef.current) { 147 | return; 148 | } 149 | 150 | onErrorRef.current?.(event); 151 | stateRef.current = 'error'; 152 | }, 153 | [onErrorRef, stateRef] 154 | ); 155 | 156 | const emitProgress = useCallback( 157 | event => { 158 | if (unmountedRef.current) { 159 | return; 160 | } 161 | 162 | assert( 163 | stateRef.current === 'started' || stateRef.current === 'has progress' || stateRef.current === 'has result' 164 | ); 165 | 166 | // Web Speech API does not emit "result" when nothing is heard, and Chrome does not emit "nomatch" event. 167 | // Because we emitted onProgress, we should emit "dictate" if not error, so they works in pair. 168 | onProgressRef.current?.(event); 169 | stateRef.current = 'has progress'; 170 | }, 171 | [onProgressRef, stateRef] 172 | ); 173 | 174 | const emitStart = useCallback(() => { 175 | if (unmountedRef.current) { 176 | return; 177 | } 178 | 179 | assert(stateRef.current === 'idle'); 180 | 181 | // "start" and "end" works as a pair. Initially, or if "end" was emitted, we should emit "start" event. 182 | onStartRef.current?.(new Event('start') as Event & { type: 'start' }); 183 | stateRef.current = 'started'; 184 | }, [onStartRef, stateRef]); 185 | 186 | const handleAudioEnd = useCallback>( 187 | ({ target }) => target === recognitionRef.current && setReadyState(3), 188 | [recognitionRef, setReadyState] 189 | ); 190 | 191 | const handleAudioStart = useCallback>( 192 | ({ target }) => { 193 | if (target !== recognitionRef.current) { 194 | return; 195 | } 196 | 197 | setReadyState(2); 198 | emitProgress({ abortable: recognitionAbortable(target), type: 'progress' }); 199 | }, 200 | [emitProgress, recognitionRef, setReadyState] 201 | ); 202 | 203 | const handleEnd = useCallback>( 204 | ({ target }) => { 205 | if (target !== recognitionRef.current) { 206 | return; 207 | } 208 | 209 | emitEnd(); 210 | setReadyState(0); 211 | 212 | recognitionRef.current = undefined; 213 | }, 214 | [emitEnd, recognitionRef, setReadyState] 215 | ); 216 | 217 | const handleError = useCallback>( 218 | event => { 219 | if (event.target !== recognitionRef.current) { 220 | return; 221 | } 222 | 223 | // Error out, no need to emit "dictate" 224 | recognitionRef.current = undefined; 225 | 226 | if (event.error === 'not-allowed') { 227 | notAllowedRef.current = true; 228 | } 229 | 230 | setReadyState(0); 231 | 232 | emitError(event); 233 | emitEnd(); 234 | }, 235 | [emitEnd, emitError, notAllowedRef, recognitionRef, setReadyState] 236 | ); 237 | 238 | const handleRawEvent = useCallback>( 239 | event => { 240 | if (event.target !== recognitionRef.current) { 241 | return; 242 | } 243 | 244 | onRawEventRef.current?.(event); 245 | }, 246 | [onRawEventRef, recognitionRef] 247 | ); 248 | 249 | const handleResult = useCallback>( 250 | ({ resultIndex, results: rawResults, target }) => { 251 | if (target !== recognitionRef.current) { 252 | return; 253 | } 254 | 255 | if (rawResults.length) { 256 | // web-speech-cognitive-services does not emit "resultIndex". 257 | 258 | // Destructuring breaks Angular due to a bug in Zone.js. 259 | // eslint-disable-next-line prefer-destructuring 260 | const rawResult = rawResults[resultIndex ?? rawResults.length - 1]; 261 | 262 | if (rawResult?.isFinal) { 263 | const alt = rawResult[0]; 264 | 265 | alt && 266 | emitDictate({ 267 | result: { 268 | confidence: alt.confidence, 269 | transcript: alt.transcript 270 | }, 271 | type: 'dictate' 272 | }); 273 | } else { 274 | emitProgress({ 275 | abortable: recognitionAbortable(target), 276 | results: Object.freeze( 277 | Array.from(rawResults) 278 | .filter(result => !result.isFinal) 279 | .map(alts => { 280 | // Destructuring breaks Angular due to a bug in Zone.js. 281 | // eslint-disable-next-line prefer-destructuring 282 | const firstAlt = alts[0]; 283 | 284 | return { 285 | confidence: firstAlt?.confidence || 0, 286 | transcript: firstAlt?.transcript || '' 287 | }; 288 | }) 289 | ), 290 | type: 'progress' 291 | }); 292 | } 293 | } 294 | }, 295 | [emitDictate, emitProgress, recognitionRef] 296 | ); 297 | 298 | const handleStart = useCallback>( 299 | ({ target }) => { 300 | if (target === recognitionRef.current) { 301 | emitStart(); 302 | setReadyState(1); 303 | } 304 | }, 305 | [emitStart, recognitionRef, setReadyState] 306 | ); 307 | 308 | useEffect(() => { 309 | if (!started) { 310 | return; 311 | } 312 | 313 | if (!speechRecognitionClassRef.current || notAllowedRef.current) { 314 | throw new Error('Speech recognition is not supported'); 315 | } else if (recognitionRef.current) { 316 | throw new Error('Speech recognition already started, cannot start a new one.'); 317 | } 318 | 319 | const grammars = speechGrammarListRef.current && grammarRef.current && new speechGrammarListRef.current(); 320 | const recognition = (recognitionRef.current = new speechRecognitionClassRef.current()); 321 | 322 | if (grammars) { 323 | grammars.addFromString(grammarRef.current, 1); 324 | 325 | recognition.grammars = grammars; 326 | } 327 | 328 | if (typeof langRef.current !== 'undefined') { 329 | recognition.lang = langRef.current; 330 | } 331 | 332 | recognition.continuous = !!continuousRef.current; 333 | recognition.interimResults = true; 334 | 335 | recognition.addEventListener('audioend', handleAudioEnd); 336 | recognition.addEventListener('audiostart', handleAudioStart); 337 | recognition.addEventListener('end', handleEnd); 338 | recognition.addEventListener('error', handleError); 339 | recognition.addEventListener('result', handleResult); 340 | recognition.addEventListener('start', handleStart); 341 | 342 | recognition.addEventListener('nomatch', handleRawEvent); 343 | recognition.addEventListener('audioend', handleRawEvent); 344 | recognition.addEventListener('audiostart', handleRawEvent); 345 | recognition.addEventListener('end', handleRawEvent); 346 | recognition.addEventListener('error', handleRawEvent); 347 | recognition.addEventListener('result', handleRawEvent); 348 | recognition.addEventListener('soundend', handleRawEvent); 349 | recognition.addEventListener('soundstart', handleRawEvent); 350 | recognition.addEventListener('speechend', handleRawEvent); 351 | recognition.addEventListener('speechstart', handleRawEvent); 352 | recognition.addEventListener('start', handleRawEvent); 353 | 354 | const { current: extra } = extraRef; 355 | 356 | extra && 357 | Object.entries(extra).forEach(([key, value]) => { 358 | if (key !== 'constructor' && key !== 'prototype' && key !== '__proto__') { 359 | // eslint-disable-next-line @typescript-eslint/no-explicit-any 360 | (recognition as any)[key] = value; 361 | } 362 | }); 363 | 364 | recognition.start(); 365 | 366 | return () => { 367 | if (recognitionAbortable(recognition)) { 368 | recognition.abort(); 369 | } else if (!unmountedRef.current) { 370 | console.warn('react-dictate-state: Cannot stop because SpeechRecognition does not have abort() function.'); 371 | } 372 | }; 373 | }, [ 374 | continuousRef, 375 | emitEnd, 376 | extraRef, 377 | grammarRef, 378 | handleAudioEnd, 379 | handleAudioStart, 380 | handleEnd, 381 | handleError, 382 | handleRawEvent, 383 | handleResult, 384 | handleStart, 385 | langRef, 386 | notAllowedRef, 387 | recognitionRef, 388 | speechGrammarListRef, 389 | speechRecognitionClassRef, 390 | started, 391 | stateRef 392 | ]); 393 | 394 | useEffect( 395 | () => () => { 396 | unmountedRef.current = true; 397 | }, 398 | [] 399 | ); 400 | 401 | const abortable = recognitionAbortable(recognitionRef.current) && readyState === 2; 402 | const supported = !!speechRecognition && !notAllowedRef.current; 403 | 404 | const context = useMemo( 405 | () => 406 | Object.freeze({ 407 | abortable, 408 | readyState, 409 | supported 410 | }), 411 | [abortable, readyState, supported] 412 | ); 413 | 414 | return ( 415 | 416 | {context => (typeof children === 'function' ? children(context) : children)} 417 | 418 | ); 419 | }; 420 | 421 | export default Composer; 422 | export { type ComposerProps }; 423 | -------------------------------------------------------------------------------- /packages/react-dictate-button/src/Context.ts: -------------------------------------------------------------------------------- 1 | import { createContext } from 'react'; 2 | 3 | type DictateContextType = Readonly<{ 4 | abortable: boolean; 5 | readyState: number; 6 | supported: boolean; 7 | }>; 8 | 9 | const Context = createContext( 10 | Object.freeze({ 11 | abortable: false, 12 | readyState: 0, 13 | supported: true 14 | }) 15 | ); 16 | 17 | export default Context; 18 | export { type DictateContextType }; 19 | -------------------------------------------------------------------------------- /packages/react-dictate-button/src/DictateButton.tsx: -------------------------------------------------------------------------------- 1 | /* eslint no-magic-numbers: ["error", { "ignore": [0, 1, 2, 3] }] */ 2 | 3 | import React, { useCallback, useState, type MouseEventHandler, type ReactNode } from 'react'; 4 | import { useRefFrom } from 'use-ref-from'; 5 | 6 | import Composer from './Composer.tsx'; 7 | import { type DictateEventHandler } from './DictateEventHandler.ts'; 8 | import { type EndEventHandler } from './EndEventHandler.ts'; 9 | import { type ErrorEventHandler } from './ErrorEventHandler.ts'; 10 | import useReadyState from './hooks/useReadyState.ts'; 11 | import useSupported from './hooks/useSupported.ts'; 12 | import { type ProgressEventHandler } from './ProgressEventHandler.ts'; 13 | import { type RawEventHandler } from './RawEventHandler.ts'; 14 | import { type SpeechGrammarListPolyfill } from './SpeechGrammarListPolyfill.ts'; 15 | import { type SpeechRecognitionPolyfill } from './SpeechRecognitionPolyfill.ts'; 16 | import { type StartEventHandler } from './StartEventHandler.ts'; 17 | 18 | type DictateButtonCoreProps = Readonly<{ 19 | children?: ((context: Readonly<{ readyState: number | undefined }>) => ReactNode) | ReactNode | undefined; 20 | className?: string | undefined; 21 | disabled?: boolean | undefined; 22 | onClick?: MouseEventHandler | undefined; 23 | }>; 24 | 25 | const DictateButtonCore = ({ children, className, disabled, onClick }: DictateButtonCoreProps) => { 26 | const [readyState] = useReadyState(); 27 | const [supported] = useSupported(); 28 | 29 | return ( 30 | 38 | ); 39 | }; 40 | 41 | type DictateButtonProps = { 42 | children?: ((context: Readonly<{ readyState: number | undefined }>) => ReactNode) | ReactNode | undefined; 43 | className?: string | undefined; 44 | continuous?: boolean | undefined; 45 | disabled?: boolean | undefined; 46 | extra?: Record | undefined; 47 | grammar?: string | undefined; 48 | lang?: string | undefined; 49 | onClick?: MouseEventHandler | undefined; 50 | onDictate?: DictateEventHandler | undefined; 51 | onEnd?: EndEventHandler | undefined; 52 | onError?: ErrorEventHandler | undefined; 53 | onProgress?: ProgressEventHandler | undefined; 54 | onRawEvent?: RawEventHandler | undefined; 55 | onStart?: StartEventHandler | undefined; 56 | speechGrammarList?: SpeechGrammarListPolyfill | undefined; 57 | speechRecognition?: SpeechRecognitionPolyfill | undefined; 58 | }; 59 | 60 | const DictateButton = ({ 61 | children, 62 | className, 63 | continuous, 64 | disabled, 65 | extra, 66 | grammar, 67 | lang, 68 | onClick, 69 | onDictate, 70 | onEnd, 71 | onError, 72 | onProgress, 73 | onRawEvent, 74 | onStart, 75 | speechGrammarList, 76 | speechRecognition 77 | }: DictateButtonProps) => { 78 | const [started, setStarted] = useState(false); 79 | const onClickRef = useRefFrom(onClick); 80 | const onEndRef = useRefFrom(onEnd); 81 | const onErrorRef = useRefFrom(onError); 82 | const onStartRef = useRefFrom(onStart); 83 | 84 | const handleClick = useCallback>( 85 | event => { 86 | onClickRef.current && onClickRef.current(event); 87 | !event.isDefaultPrevented() && setStarted(started => !started); 88 | }, 89 | [onClickRef, setStarted] 90 | ); 91 | 92 | const handleEnd = useCallback( 93 | event => { 94 | setStarted(false); 95 | onEndRef.current?.(event); 96 | }, 97 | [onEndRef, setStarted] 98 | ); 99 | 100 | const handleError = useCallback( 101 | event => { 102 | setStarted(false); 103 | onErrorRef.current?.(event); 104 | }, 105 | [onErrorRef, setStarted] 106 | ); 107 | 108 | const handleStart = useCallback( 109 | event => { 110 | setStarted(true); 111 | onStartRef.current?.(event); 112 | }, 113 | [onStartRef, setStarted] 114 | ); 115 | 116 | return ( 117 | 132 | 133 | {children} 134 | 135 | 136 | ); 137 | }; 138 | 139 | export default DictateButton; 140 | export { type DictateButtonProps }; 141 | -------------------------------------------------------------------------------- /packages/react-dictate-button/src/DictateCheckbox.tsx: -------------------------------------------------------------------------------- 1 | /* eslint no-magic-numbers: ["error", { "ignore": [0, 1, 2, 3] }] */ 2 | 3 | import React, { useCallback, useState, type FormEventHandler, type ReactNode } from 'react'; 4 | 5 | import { useRefFrom } from 'use-ref-from'; 6 | import Composer from './Composer.tsx'; 7 | import { type DictateEventHandler } from './DictateEventHandler.ts'; 8 | import { type EndEventHandler } from './EndEventHandler.ts'; 9 | import { type ErrorEventHandler } from './ErrorEventHandler.ts'; 10 | import useReadyState from './hooks/useReadyState.ts'; 11 | import useSupported from './hooks/useSupported.ts'; 12 | import { type ProgressEventHandler } from './ProgressEventHandler.ts'; 13 | import { type RawEventHandler } from './RawEventHandler.ts'; 14 | import { type SpeechGrammarListPolyfill } from './SpeechGrammarListPolyfill.ts'; 15 | import { type SpeechRecognitionPolyfill } from './SpeechRecognitionPolyfill.ts'; 16 | import { type StartEventHandler } from './StartEventHandler.ts'; 17 | 18 | type DictateCheckboxCoreProps = { 19 | children?: ((context: Readonly<{ readyState: number }>) => ReactNode) | ReactNode | undefined; 20 | className?: string | undefined; 21 | disabled?: boolean | undefined; 22 | onChange?: FormEventHandler; 23 | started?: boolean; 24 | }; 25 | 26 | const DictateCheckboxCore = ({ children, className, disabled, onChange, started }: DictateCheckboxCoreProps) => { 27 | const [readyState] = useReadyState(); 28 | const [supported] = useSupported(); 29 | 30 | return ( 31 | 41 | ); 42 | }; 43 | 44 | type DictateCheckboxProps = { 45 | children?: ((context: Readonly<{ readyState: number }>) => ReactNode) | ReactNode | undefined; 46 | className?: string | undefined; 47 | continuous?: boolean | undefined; 48 | disabled?: boolean | undefined; 49 | extra?: Record | undefined; 50 | grammar?: string | undefined; 51 | lang?: string | undefined; 52 | onDictate?: DictateEventHandler | undefined; 53 | onEnd?: EndEventHandler | undefined; 54 | onError?: ErrorEventHandler | undefined; 55 | onProgress?: ProgressEventHandler | undefined; 56 | onRawEvent?: RawEventHandler | undefined; 57 | onStart?: StartEventHandler | undefined; 58 | speechGrammarList?: SpeechGrammarListPolyfill | undefined; 59 | speechRecognition?: SpeechRecognitionPolyfill | undefined; 60 | }; 61 | 62 | const DictateCheckbox = ({ 63 | children, 64 | className, 65 | continuous, 66 | disabled, 67 | extra, 68 | grammar, 69 | lang, 70 | onDictate, 71 | onEnd, 72 | onError, 73 | onProgress, 74 | onRawEvent, 75 | onStart, 76 | speechGrammarList, 77 | speechRecognition 78 | }: DictateCheckboxProps) => { 79 | const [started, setStarted] = useState(false); 80 | const onEndRef = useRefFrom(onEnd); 81 | const onErrorRef = useRefFrom(onError); 82 | const onStartRef = useRefFrom(onStart); 83 | 84 | const handleChange = useCallback>( 85 | ({ currentTarget: { checked } }) => setStarted(checked), 86 | [setStarted] 87 | ); 88 | 89 | const handleEnd = useCallback( 90 | event => { 91 | setStarted(false); 92 | onEndRef.current?.(event); 93 | }, 94 | [onEndRef, setStarted] 95 | ); 96 | 97 | const handleError = useCallback( 98 | event => { 99 | setStarted(false); 100 | onErrorRef.current?.(event); 101 | }, 102 | [onErrorRef, setStarted] 103 | ); 104 | 105 | const handleStart = useCallback( 106 | event => { 107 | setStarted(true); 108 | onStartRef.current?.(event); 109 | }, 110 | [onStartRef, setStarted] 111 | ); 112 | 113 | return ( 114 | 129 | 130 | {children} 131 | 132 | 133 | ); 134 | }; 135 | 136 | export default DictateCheckbox; 137 | export { type DictateCheckboxProps }; 138 | -------------------------------------------------------------------------------- /packages/react-dictate-button/src/DictateEventHandler.ts: -------------------------------------------------------------------------------- 1 | import { type TypedEventHandler } from './TypedEventHandler.ts'; 2 | 3 | export type DictateEventHandler = TypedEventHandler<{ 4 | result?: { confidence: number; transcript: string } | undefined; 5 | type: 'dictate'; 6 | }>; 7 | -------------------------------------------------------------------------------- /packages/react-dictate-button/src/EndEventHandler.ts: -------------------------------------------------------------------------------- 1 | import { type TypedEventHandler } from './TypedEventHandler.ts'; 2 | 3 | export type EndEventHandler = TypedEventHandler<{ 4 | type: 'end'; 5 | }>; 6 | -------------------------------------------------------------------------------- /packages/react-dictate-button/src/ErrorEventHandler.ts: -------------------------------------------------------------------------------- 1 | import { type TypedEventHandler } from './TypedEventHandler.ts'; 2 | 3 | export type ErrorEventHandler = TypedEventHandler; 4 | -------------------------------------------------------------------------------- /packages/react-dictate-button/src/ProgressEventHandler.ts: -------------------------------------------------------------------------------- 1 | import { type TypedEventHandler } from './TypedEventHandler.ts'; 2 | 3 | export type ProgressEventHandler = TypedEventHandler<{ 4 | abortable: boolean; 5 | results?: readonly { confidence: number; transcript: string }[] | undefined; 6 | type: 'progress'; 7 | }>; 8 | -------------------------------------------------------------------------------- /packages/react-dictate-button/src/RawEventHandler.ts: -------------------------------------------------------------------------------- 1 | import { type TypedEventHandler } from './TypedEventHandler.ts'; 2 | 3 | export type RawEventHandler = TypedEventHandler; 4 | -------------------------------------------------------------------------------- /packages/react-dictate-button/src/SpeechGrammarListPolyfill.ts: -------------------------------------------------------------------------------- 1 | export interface SpeechGrammarListPolyfill { 2 | new (): SpeechGrammarList; 3 | } 4 | -------------------------------------------------------------------------------- /packages/react-dictate-button/src/SpeechRecognitionPolyfill.ts: -------------------------------------------------------------------------------- 1 | export interface SpeechRecognitionPolyfill { 2 | new (): SpeechRecognition; 3 | } 4 | -------------------------------------------------------------------------------- /packages/react-dictate-button/src/StartEventHandler.ts: -------------------------------------------------------------------------------- 1 | import { type TypedEventHandler } from './TypedEventHandler.ts'; 2 | 3 | export type StartEventHandler = TypedEventHandler<{ 4 | type: 'start'; 5 | }>; 6 | -------------------------------------------------------------------------------- /packages/react-dictate-button/src/TypedEventHandler.ts: -------------------------------------------------------------------------------- 1 | export type TypedEventHandler = (event: T) => void; 2 | -------------------------------------------------------------------------------- /packages/react-dictate-button/src/env.d.ts: -------------------------------------------------------------------------------- 1 | /// 2 | -------------------------------------------------------------------------------- /packages/react-dictate-button/src/hooks/internal/useDictateContext.ts: -------------------------------------------------------------------------------- 1 | import { useContext } from 'react'; 2 | 3 | import Context, { type DictateContextType } from '../../Context.ts'; 4 | 5 | export default function useDictateContext(): DictateContextType { 6 | return useContext(Context); 7 | } 8 | -------------------------------------------------------------------------------- /packages/react-dictate-button/src/hooks/useAbortable.ts: -------------------------------------------------------------------------------- 1 | import useDictateContext from './internal/useDictateContext.ts'; 2 | 3 | export default function useAbortable(): readonly [boolean] { 4 | const { abortable } = useDictateContext(); 5 | 6 | return Object.freeze([abortable]); 7 | } 8 | -------------------------------------------------------------------------------- /packages/react-dictate-button/src/hooks/useReadyState.ts: -------------------------------------------------------------------------------- 1 | import useDictateContext from './internal/useDictateContext.ts'; 2 | 3 | export default function useReadyState(): readonly [number] { 4 | const { readyState } = useDictateContext(); 5 | 6 | return Object.freeze([readyState]); 7 | } 8 | -------------------------------------------------------------------------------- /packages/react-dictate-button/src/hooks/useSupported.ts: -------------------------------------------------------------------------------- 1 | import useDictateContext from './internal/useDictateContext.ts'; 2 | 3 | export default function useSupported(): readonly [boolean] { 4 | const { supported } = useDictateContext(); 5 | 6 | return Object.freeze([supported]); 7 | } 8 | -------------------------------------------------------------------------------- /packages/react-dictate-button/src/index.spec.ts: -------------------------------------------------------------------------------- 1 | test('import should work', () => { 2 | require('./index'); 3 | }); 4 | -------------------------------------------------------------------------------- /packages/react-dictate-button/src/index.ts: -------------------------------------------------------------------------------- 1 | // eslint-disable-next-line import/no-unassigned-import 2 | import './env.d.ts'; 3 | 4 | import Composer, { type ComposerProps } from './Composer.tsx'; 5 | import Context_ from './Context.ts'; 6 | import DictateButton, { type DictateButtonProps } from './DictateButton.tsx'; 7 | import DictateCheckbox, { type DictateCheckboxProps } from './DictateCheckbox.tsx'; 8 | import { type DictateEventHandler } from './DictateEventHandler.ts'; 9 | import { type EndEventHandler } from './EndEventHandler.ts'; 10 | import { type ErrorEventHandler } from './ErrorEventHandler.ts'; 11 | import useAbortable from './hooks/useAbortable.ts'; 12 | import useReadyState from './hooks/useReadyState.ts'; 13 | import useSupported from './hooks/useSupported.ts'; 14 | import { type ProgressEventHandler } from './ProgressEventHandler.ts'; 15 | import { type RawEventHandler } from './RawEventHandler.ts'; 16 | import { type SpeechGrammarListPolyfill } from './SpeechGrammarListPolyfill.ts'; 17 | import { type SpeechRecognitionPolyfill } from './SpeechRecognitionPolyfill.ts'; 18 | import { type StartEventHandler } from './StartEventHandler.ts'; 19 | import { type TypedEventHandler } from './TypedEventHandler.ts'; 20 | 21 | /** @deprecated Use `useAbortable`, `useReadyState`, and `useSupported` hooks instead. */ 22 | const Context = Context_; 23 | 24 | /** @deprecated Use `import { DictateButton } from 'react-dictate-button'` instead. */ 25 | const DictateButton_ = DictateButton; 26 | 27 | export default DictateButton_; 28 | 29 | export { 30 | Composer, 31 | Context, 32 | DictateButton, 33 | DictateCheckbox, 34 | useAbortable, 35 | useReadyState, 36 | useSupported, 37 | type ComposerProps, 38 | type DictateButtonProps, 39 | type DictateCheckboxProps, 40 | type DictateEventHandler, 41 | type EndEventHandler, 42 | type ErrorEventHandler, 43 | type ProgressEventHandler, 44 | type RawEventHandler, 45 | type SpeechGrammarListPolyfill, 46 | type SpeechRecognitionPolyfill, 47 | type StartEventHandler, 48 | type TypedEventHandler 49 | }; 50 | -------------------------------------------------------------------------------- /packages/react-dictate-button/src/internal.ts: -------------------------------------------------------------------------------- 1 | export * from 'react-dictate-button-mocked-speech-recognition'; 2 | -------------------------------------------------------------------------------- /packages/react-dictate-button/src/private/assert.ts: -------------------------------------------------------------------------------- 1 | declare global { 2 | const IS_DEVELOPMENT: boolean; 3 | } 4 | 5 | export default function assert(truthy: boolean) { 6 | if (IS_DEVELOPMENT && !truthy) { 7 | throw new Error('Assertion failed.'); 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /packages/react-dictate-button/src/tsconfig.custom.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "@tsconfig/strictest/tsconfig.json" 3 | } 4 | -------------------------------------------------------------------------------- /packages/react-dictate-button/src/tsconfig.json: -------------------------------------------------------------------------------- 1 | // This configuration file is for VSCode only. 2 | { 3 | "compilerOptions": { 4 | "allowImportingTsExtensions": true, 5 | "esModuleInterop": true, 6 | "jsx": "react", 7 | "module": "ESNext", 8 | "moduleResolution": "Bundler", 9 | "noEmit": true, 10 | "strict": true, 11 | "target": "ESNext", 12 | "types": [ 13 | "jest", 14 | "node" 15 | ] 16 | }, 17 | "extends": "./tsconfig.custom.json" 18 | } 19 | -------------------------------------------------------------------------------- /packages/react-dictate-button/src/tsconfig.precommit.production.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "allowImportingTsExtensions": true, 4 | "esModuleInterop": true, 5 | "jsx": "react", 6 | "module": "ESNext", 7 | "moduleResolution": "Bundler", 8 | "noEmit": true, 9 | "strict": true, 10 | "target": "ESNext", 11 | "types": [] 12 | }, 13 | "exclude": [ 14 | "**/*.spec.ts", 15 | "**/*.spec.tsx", 16 | "**/*.test.ts", 17 | "**/*.test.tsx", 18 | "__tests__/**/*" 19 | ], 20 | "extends": "./tsconfig.custom.json" 21 | } 22 | -------------------------------------------------------------------------------- /packages/react-dictate-button/src/tsconfig.precommit.test.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "allowImportingTsExtensions": true, 4 | "esModuleInterop": true, 5 | "jsx": "react", 6 | "module": "ESNext", 7 | "moduleResolution": "Bundler", 8 | "noEmit": true, 9 | "strict": true, 10 | "target": "ESNext", 11 | "types": [ 12 | "jest", 13 | "node" 14 | ] 15 | }, 16 | "extends": "@tsconfig/recommended/tsconfig.json", 17 | "include": [ 18 | "**/*.spec.ts", 19 | "**/*.spec.tsx", 20 | "**/*.test.ts", 21 | "**/*.test.tsx", 22 | "__tests__/**/*" 23 | ] 24 | } 25 | -------------------------------------------------------------------------------- /packages/react-dictate-button/src/tsconfig.test.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "noEmit": true, 4 | "skipLibCheck": true // "@fluentui/*" has internal conflicts on their types 5 | }, 6 | "extends": "./tsconfig.json", 7 | "include": ["**/*.spec.*", "**/*.test.*"] 8 | } 9 | -------------------------------------------------------------------------------- /packages/react-dictate-button/src/usePrevious.ts: -------------------------------------------------------------------------------- 1 | import { useEffect, useRef } from 'react'; 2 | 3 | export default function usePrevious(value: T): T | undefined { 4 | const ref = useRef(); 5 | 6 | useEffect(() => { 7 | ref.current = value; 8 | }); 9 | 10 | return ref.current; 11 | } 12 | -------------------------------------------------------------------------------- /packages/react-dictate-button/src/vendorPrefix.ts: -------------------------------------------------------------------------------- 1 | // eslint-disable-next-line @typescript-eslint/no-explicit-any 2 | export default function vendorPrefix(name: string): T | undefined { 3 | if (typeof window !== 'undefined') { 4 | // eslint-disable-next-line @typescript-eslint/no-explicit-any 5 | return name in window && typeof (window as Record)[name] !== 'undefined' 6 | ? // eslint-disable-next-line @typescript-eslint/no-explicit-any 7 | (window as Record)[name] 8 | : // eslint-disable-next-line @typescript-eslint/no-explicit-any 9 | (window as Record)[`webkit${name}`]; 10 | } 11 | 12 | return; 13 | } 14 | -------------------------------------------------------------------------------- /packages/react-dictate-button/tsup.config.ts: -------------------------------------------------------------------------------- 1 | import { defineConfig } from 'tsup'; 2 | 3 | export default defineConfig([ 4 | { 5 | define: { IS_DEVELOPMENT: 'false' }, 6 | dts: true, 7 | entry: { 8 | 'react-dictate-button': './src/index.ts', 9 | 'react-dictate-button.internal': './src/internal.ts' 10 | }, 11 | format: ['cjs', 'esm'], 12 | sourcemap: true 13 | } 14 | ]); 15 | --------------------------------------------------------------------------------