├── .babelrc
├── .circleci
└── config.yml
├── .eslintrc
├── .gitignore
├── .npmignore
├── .prettierrc
├── README.md
├── examples
└── src
│ ├── images
│ ├── favicon.ico
│ └── github.png
│ ├── index.html
│ ├── index.jsx
│ ├── shared.js
│ ├── useSpeechRecognition.jsx
│ └── useSpeechSynthesis.jsx
├── jest.setup.js
├── package.json
├── src
├── index.js
├── useSpeechRecognition.js
└── useSpeechSynthesis.jsx
├── test
├── mocks
│ ├── MockRecognition.jsx
│ ├── MockSynthesis.js
│ └── MockUtterance.js
├── shared
│ └── speechSynthesisTests.jsx
├── useSpeechRecognition.spec.jsx
└── useSpeechSynthesis.spec.jsx
├── webpack.config.js
└── yarn.lock
/.babelrc:
--------------------------------------------------------------------------------
1 | {
2 | "presets": ["env", "react"]
3 | }
4 |
--------------------------------------------------------------------------------
/.circleci/config.yml:
--------------------------------------------------------------------------------
1 | # Javascript Node CircleCI 2.0 configuration file
2 | #
3 | # Check https://circleci.com/docs/2.0/language-javascript/ for more details
4 | #
5 | version: 2.1
6 | jobs:
7 | build:
8 | docker:
9 | # specify the version you desire here
10 | - image: circleci/node:10.15
11 |
12 | # Specify service dependencies here if necessary
13 | # CircleCI maintains a library of pre-built images
14 | # documented at https://circleci.com/docs/2.0/circleci-images/
15 | # - image: circleci/mongo:3.4.4
16 |
17 | working_directory: ~/repo
18 |
19 | steps:
20 | - checkout
21 |
22 | # Download and cache dependencies
23 | - restore_cache:
24 | keys:
25 | - v1-dependencies-{{ checksum "package.json" }}
26 | # fallback to using the latest cache if no exact match is found
27 | - v1-dependencies-
28 |
29 | - run: yarn install
30 |
31 | - save_cache:
32 | paths:
33 | - node_modules
34 | key: v1-dependencies-{{ checksum "package.json" }}
35 |
36 | # run tests!
37 | - run: yarn test
38 | - run:
39 | name: Generate code coverage
40 | command: './node_modules/.bin/codecov'
41 |
--------------------------------------------------------------------------------
/.eslintrc:
--------------------------------------------------------------------------------
1 | {
2 | "globals": {
3 | "document": true,
4 | "window": true
5 | },
6 | "env": {
7 | "jest/globals": true
8 | },
9 | "extends": ["airbnb", "plugin:prettier/recommended"],
10 | "parser": "babel-eslint",
11 | "parserOptions": {
12 | "ecmaFeatures": {
13 | "experimentalObjectRestSpread": true,
14 | "jsx": true,
15 | "es6": true
16 | },
17 | "sourceType": "module"
18 | },
19 | "plugins": ["babel", "jest", "react", "prettier"],
20 | "rules": {
21 | "prettier/prettier": "error",
22 | "object-curly-newline": "off",
23 | "jsx-a11y/label-has-associated-control": [
24 | "error",
25 | {
26 | "labelComponents": [],
27 | "labelAttributes": [],
28 | "controlComponents": [],
29 | "assert": "either",
30 | "depth": 25
31 | }
32 | ],
33 | "jsx-a11y/label-has-for": [
34 | 2,
35 | {
36 | "required": {
37 | "some": ["nesting", "id"]
38 | }
39 | }
40 | ],
41 | "import/no-extraneous-dependencies": [
42 | "error",
43 | { "devDependencies": ["examples/src/*", "**/*.spec.js", "**/*.spec.jsx"] }
44 | ],
45 | "import/no-unresolved": ["error", { "ignore": ["^react$"] }]
46 | }
47 | }
48 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | dist
2 | coverage
3 | .DS_Store
4 | /node_modules
5 | yarn-error.log
6 |
--------------------------------------------------------------------------------
/.npmignore:
--------------------------------------------------------------------------------
1 | # Logs
2 | *.log
3 |
4 | # Development files
5 | src
6 | examples
7 |
8 | # Test
9 | coverage
10 | test
11 |
12 | # Config
13 | .gitignore
14 | .babelrc
15 | webpack.config.js
16 | .circleci
17 | jest.setup.js
18 | .eslintrc
19 |
--------------------------------------------------------------------------------
/.prettierrc:
--------------------------------------------------------------------------------
1 | {
2 | "useTabs": false,
3 | "singleQuote": true
4 | }
5 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # react-speech-kit 🎤 [](https://circleci.com/gh/MikeyParton/react-speech-kit/tree/master) [](https://codecov.io/gh/MikeyParton/react-speech-kit)
2 |
3 | React hooks for in-browser Speech Recognition and Speech Synthesis.
4 | [Demo here](https://mikeyparton.github.io/react-speech-kit/)
5 |
6 | ## Table of Contents
7 |
8 | - [Install](#install)
9 | - [Examples and Demo](#examples-and-demo)
10 | - [useSpeechSynthesis](#usespeechsynthesis)
11 | - [Usage](#usage)
12 | - [Args](#args)
13 | - [Returns](#returns)
14 | - [useSpeechRecognition](#usespeechrecognition)
15 | - [Usage](#usage-1)
16 | - [Args](#args-1)
17 | - [Returns](#returns-1)
18 |
19 | ## Install
20 |
21 | ```bash
22 | yarn add react-speech-kit
23 | ```
24 |
25 | ## Examples and Demo
26 |
27 | A full example can be found [here](https://mikeyparton.github.io/react-speech-kit/). The source code is in the [examples directory](https://github.com/MikeyParton/react-speech-kit/tree/master/examples/src).
28 |
29 | ## useSpeechSynthesis
30 |
31 | A react hook for the browser's [SpeechSynthesis API](https://developer.mozilla.org/en-US/docs/Web/API/SpeechSynthesis). It exposes the options and controls to the underlying SpeechSynthesis in the browser.
32 |
33 | ### Usage
34 |
35 | ```jsx
36 | import React, { useState } from 'react';
37 | import { useSpeechSynthesis } from 'react-speech-kit';
38 |
39 | function Example() {
40 | const [value, setValue] = useState('');
41 | const { speak } = useSpeechSynthesis();
42 |
43 | return (
44 |
45 |
51 | );
52 | }
53 | ```
54 |
55 | ### Args
56 |
57 | #### onEnd
58 |
59 | `function()` _optional_
60 |
61 | Called when SpeechSynthesis finishes reading the text or is cancelled. It is called with no argumnents. Very useful for triggering a state change after something has been read.
62 |
63 | ### Returns
64 |
65 | useSpeechSynthesis returns an object which contains the following:
66 |
67 | #### speak
68 |
69 | `function({ text: string, voice: SpeechSynthesisVoice })`
70 |
71 | Call to make the browser read some text. You can change the voice by passing an available SpeechSynthesisVoice (from the voices array). Note that some browsers require a direct user action initiate SpeechSynthesis. To make sure it works, it is recommended that you call speak for the first time in a click event handler.
72 |
73 | #### cancel
74 |
75 | `function()`
76 |
77 | Call to make SpeechSynthesis stop reading.
78 |
79 | #### speaking
80 |
81 | `boolean`
82 |
83 | True when SpeechSynthesis is actively speaking.
84 |
85 | #### supported
86 |
87 | `boolean`
88 |
89 | Will be true if the browser supports SpeechSynthesis. Keep this in mind and use this as a guard before rendering any control that allow a user to call speak.
90 |
91 | #### voices
92 |
93 | `[SpeechSynthesisVoice]`
94 |
95 | An array of available voices which can be passed to the speak function. An example SpeechSynthesisVoice voice has the following properties.
96 |
97 | ```
98 | {
99 | default: true
100 | lang: "en-AU"
101 | localService: true
102 | name: "Karen"
103 | voiceURI: "Karen"
104 | }
105 | ```
106 |
107 | In some browsers voices load asynchronously. In these cases, the array will be empty until they are available.
108 |
109 | ## useSpeechRecognition
110 |
111 | A react hook for the browser's [SpeechRecognition API](https://developer.mozilla.org/en-US/docs/Web/API/SpeechRecognition).
112 |
113 | ### Usage
114 |
115 | ```jsx
116 | import React, { useState } from 'react';
117 | import { useSpeechRecognition } from 'react-speech-kit';
118 |
119 | function Example() {
120 | const [value, setValue] = useState('');
121 | const { listen, listening, stop } = useSpeechRecognition({
122 | onResult: (result) => {
123 | setValue(result);
124 | },
125 | });
126 |
127 | return (
128 |
138 | );
139 | }
140 | ```
141 |
142 | ### Args
143 |
144 | #### onEnd
145 |
146 | `function()`
147 |
148 | Called when SpeechRecognition stops listening.
149 |
150 | #### onResult
151 |
152 | `function(string)`
153 |
154 | Called when SpeechRecognition has a result. It is called with a string containing a transcript of the recognized speech.
155 |
156 | ### Returns
157 |
158 | useSpeechRecognition returns an object which contains the following:
159 |
160 | #### listen
161 |
162 | `function({ interimResults: boolean, lang: string })`
163 |
164 | Call to make the browser start listening for input. Every time it processes a result, it will forward a transcript to the provided onResult function. You can modify behavior by passing the following arguments:
165 |
166 | - **lang**
167 | `string`
168 | The language the SpeechRecognition will try to interpret the input in. Use the languageCode from this list of languages that Chrome supports ([here](https://cloud.google.com/speech-to-text/docs/languages)) e.g: "en-AU". If not specified, this defaults to the HTML lang attribute value, or the user agent's language setting if that isn't set either.
169 |
170 | - **interimResults**
171 | `boolean` _(default: true)_
172 | SpeechRecognition can provide realtime results as it's trying to figure out the best match for the input. Set to false if you only want the final result.
173 |
174 | #### stop
175 |
176 | `function()`
177 |
178 | Call to make SpeechRecognition stop listening. This will call the provided onEnd function as well.
179 |
180 | #### listening
181 |
182 | `boolean`
183 |
184 | True when SpeechRecognition is actively listening.
185 |
186 | #### supported
187 |
188 | `boolean`
189 |
190 | Will be true if the browser supports SpeechRecognition. Keep this in mind and use this as a guard before rendering any control that allow a user to call listen.
191 |
--------------------------------------------------------------------------------
/examples/src/images/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MikeyParton/react-speech-kit/d8f42d3968d2034c3a45fd3d7c33c4e83de62774/examples/src/images/favicon.ico
--------------------------------------------------------------------------------
/examples/src/images/github.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MikeyParton/react-speech-kit/d8f42d3968d2034c3a45fd3d7c33c4e83de62774/examples/src/images/github.png
--------------------------------------------------------------------------------
/examples/src/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 | react-speech-kit | Demo
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 | You need to enable JavaScript to run this app.
12 |
13 |
14 |
15 |
16 |
--------------------------------------------------------------------------------
/examples/src/index.jsx:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import { render } from 'react-dom';
3 | import SpeechSynthesisExample from './useSpeechSynthesis';
4 | import SpeechRecognitionExample from './useSpeechRecognition';
5 | import { GlobalStyles, Row, GitLink, Title } from './shared';
6 | import gh from './images/github.png';
7 |
8 | const App = () => (
9 |
10 |
11 |
12 | {'react-speech-kit '}
13 |
14 | 🎤
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 | By MikeyParton
25 |
26 |
27 |
28 | );
29 |
30 | render( , document.getElementById('root'));
31 |
--------------------------------------------------------------------------------
/examples/src/shared.js:
--------------------------------------------------------------------------------
1 | import styled, { createGlobalStyle } from 'styled-components';
2 |
3 | export const GlobalStyles = createGlobalStyle`
4 | @import url('https://fonts.googleapis.com/css?family=Lato');
5 |
6 | html {
7 | background-color: #EBECEC;
8 | font-family: 'Lato', sans-serif;
9 | }
10 | `;
11 |
12 | export const Row = styled.div`
13 | display: flex;
14 | justify-content: center;
15 | flex-wrap: wrap;
16 | `;
17 |
18 | export const Title = styled.h1`
19 | text-align: center;
20 | `;
21 |
22 | export const GitLink = styled.div`
23 | align-items: center;
24 | display: flex;
25 | justify-content: center;
26 | margin-top: 20px;
27 |
28 | img {
29 | height: 32px;
30 | margin-right: 10px;
31 | width: 32px;
32 | }
33 | `;
34 |
35 | export const Container = styled.div`
36 | border-radius: 10px;
37 | background-color: lightgrey;
38 | padding: 20px;
39 | margin: 0 20px 20px 20px;
40 | max-width: 300px;
41 |
42 | button {
43 | border-radius: 4px;
44 | font-size: 16px;
45 | padding: 8px;
46 | text-align: center;
47 | width: 100%;
48 | }
49 |
50 | h2 {
51 | margin-top: 0;
52 | }
53 |
54 | label {
55 | display: block;
56 | font-weight: bold;
57 | margin-bottom: 4px;
58 | }
59 |
60 | select,
61 | textarea {
62 | font-size: 16px;
63 | margin-bottom: 12px;
64 | width: 100%;
65 | }
66 |
67 | textarea {
68 | border: 1px solid darkgrey;
69 | border-radius: 10px;
70 | padding: 8px;
71 | resize: none;
72 | }
73 | `;
74 |
--------------------------------------------------------------------------------
/examples/src/useSpeechRecognition.jsx:
--------------------------------------------------------------------------------
1 | import React, { useState } from 'react';
2 | import { useSpeechRecognition } from '../../src';
3 | import { Container } from './shared';
4 |
5 | const languageOptions = [
6 | { label: 'Cambodian', value: 'km-KH' },
7 | { label: 'Deutsch', value: 'de-DE' },
8 | { label: 'English', value: 'en-AU' },
9 | { label: 'Farsi', value: 'fa-IR' },
10 | { label: 'Français', value: 'fr-FR' },
11 | { label: 'Italiano', value: 'it-IT' },
12 | { label: '普通话 (中国大陆) - Mandarin', value: 'zh' },
13 | { label: 'Portuguese', value: 'pt-BR' },
14 | { label: 'Español', value: 'es-MX' },
15 | { label: 'Svenska - Swedish', value: 'sv-SE' },
16 | ];
17 |
18 | const Example = () => {
19 | const [lang, setLang] = useState('en-AU');
20 | const [value, setValue] = useState('');
21 | const [blocked, setBlocked] = useState(false);
22 |
23 | const onEnd = () => {
24 | // You could do something here after listening has finished
25 | };
26 |
27 | const onResult = (result) => {
28 | setValue(result);
29 | };
30 |
31 | const changeLang = (event) => {
32 | setLang(event.target.value);
33 | };
34 |
35 | const onError = (event) => {
36 | if (event.error === 'not-allowed') {
37 | setBlocked(true);
38 | }
39 | };
40 |
41 | const { listen, listening, stop, supported } = useSpeechRecognition({
42 | onResult,
43 | onEnd,
44 | onError,
45 | });
46 |
47 | const toggle = listening
48 | ? stop
49 | : () => {
50 | setBlocked(false);
51 | listen({ lang });
52 | };
53 |
54 | return (
55 |
56 |
103 |
104 | );
105 | };
106 |
107 | export default Example;
108 |
--------------------------------------------------------------------------------
/examples/src/useSpeechSynthesis.jsx:
--------------------------------------------------------------------------------
1 | import React, { useState } from 'react';
2 | import { useSpeechSynthesis } from '../../src';
3 | import { Container } from './shared';
4 |
5 | const Example = () => {
6 | const [text, setText] = useState('I am a robot');
7 | const [pitch, setPitch] = useState(1);
8 | const [rate, setRate] = useState(1);
9 | const [voiceIndex, setVoiceIndex] = useState(null);
10 | const onEnd = () => {
11 | // You could do something here after speaking has finished
12 | };
13 | const { speak, cancel, speaking, supported, voices } = useSpeechSynthesis({
14 | onEnd,
15 | });
16 |
17 | const voice = voices[voiceIndex] || null;
18 |
19 | const styleFlexRow = { display: 'flex', flexDirection: 'row' };
20 | const styleContainerRatePitch = {
21 | display: 'flex',
22 | flexDirection: 'column',
23 | marginBottom: 12,
24 | };
25 |
26 | return (
27 |
28 |
29 | Speech Synthesis
30 | {!supported && (
31 |
32 | Oh no, it looks like your browser doesn't support Speech
33 | Synthesis.
34 |
35 | )}
36 | {supported && (
37 |
38 |
39 | {`Type a message below then click 'Speak'
40 | and SpeechSynthesis will read it out.`}
41 |
42 | Voice
43 | {
48 | setVoiceIndex(event.target.value);
49 | }}
50 | >
51 | Default
52 | {voices.map((option, index) => (
53 |
54 | {`${option.lang} - ${option.name}`}
55 |
56 | ))}
57 |
58 |
59 |
60 |
Rate:
61 |
{rate}
62 |
63 |
{
71 | setRate(event.target.value);
72 | }}
73 | />
74 |
75 |
76 |
77 |
Pitch:
78 |
{pitch}
79 |
80 |
{
88 | setPitch(event.target.value);
89 | }}
90 | />
91 |
92 | Message
93 | {
99 | setText(event.target.value);
100 | }}
101 | />
102 | {speaking ? (
103 |
104 | Stop
105 |
106 | ) : (
107 | speak({ text, voice, rate, pitch })}
110 | >
111 | Speak
112 |
113 | )}
114 |
115 | )}
116 |
117 |
118 | );
119 | };
120 |
121 | export default Example;
122 |
--------------------------------------------------------------------------------
/jest.setup.js:
--------------------------------------------------------------------------------
1 | import { configure } from 'enzyme';
2 | import Adapter from 'enzyme-adapter-react-16';
3 |
4 | configure({ adapter: new Adapter() });
5 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "react-speech-kit",
3 | "version": "3.0.1",
4 | "description": "React hooks for in-browser Speech Recognition and Speech Synthesis.",
5 | "main": "dist/index.js",
6 | "scripts": {
7 | "build": "babel src -d dist --copy-files",
8 | "start": "webpack-dev-server --mode development",
9 | "lint": "./node_modules/.bin/eslint src examples/src --ext .js,.jsx",
10 | "test": "jest",
11 | "build-demo": "webpack --mode production",
12 | "deploy-demo": "gh-pages -d examples/dist",
13 | "publish-demo": "npm run build-demo && npm run deploy-demo"
14 | },
15 | "author": "Michael Parton",
16 | "license": "MIT",
17 | "peerDependencies": {
18 | "prop-types": "^15.7.2",
19 | "react": "^16.8.0"
20 | },
21 | "devDependencies": {
22 | "babel-cli": "^6.26.0",
23 | "babel-core": "^6.26.3",
24 | "babel-eslint": "^10.0.1",
25 | "babel-jest": "^23.0.0",
26 | "babel-loader": "7",
27 | "babel-preset-env": "^1.7.0",
28 | "babel-preset-react": "^6.24.1",
29 | "codecov": "^3.2.0",
30 | "enzyme": "^3.9.0",
31 | "enzyme-adapter-react-16": "^1.11.2",
32 | "eslint": "^5.15.0",
33 | "eslint-config-airbnb": "^17.1.0",
34 | "eslint-config-prettier": "^6.11.0",
35 | "eslint-plugin-babel": "^5.3.0",
36 | "eslint-plugin-import": "^2.16.0",
37 | "eslint-plugin-jest": "^22.3.2",
38 | "eslint-plugin-jsx-a11y": "^6.2.1",
39 | "eslint-plugin-prettier": "^3.1.3",
40 | "eslint-plugin-react": "^7.12.4",
41 | "file-loader": "^3.0.1",
42 | "gh-pages": "^2.0.1",
43 | "html-loader": "^0.5.5",
44 | "html-webpack-plugin": "^3.2.0",
45 | "jest": "^23.0.0",
46 | "prettier": "^2.0.5",
47 | "prop-types": "^15.7.2",
48 | "react": "^16.8.0",
49 | "react-dom": "^16.8.3",
50 | "styled-components": "^4.1.3",
51 | "webpack": "^4.29.6",
52 | "webpack-cli": "^3.2.3",
53 | "webpack-dev-server": "^3.2.1"
54 | },
55 | "dependencies": {},
56 | "jest": {
57 | "coverageDirectory": "./coverage/",
58 | "collectCoverage": true,
59 | "moduleFileExtensions": [
60 | "jsx",
61 | "js"
62 | ],
63 | "setupFiles": [
64 | "/jest.setup.js"
65 | ]
66 | }
67 | }
68 |
--------------------------------------------------------------------------------
/src/index.js:
--------------------------------------------------------------------------------
1 | export { default as useSpeechRecognition } from './useSpeechRecognition';
2 | export { default as useSpeechSynthesis } from './useSpeechSynthesis';
3 |
--------------------------------------------------------------------------------
/src/useSpeechRecognition.js:
--------------------------------------------------------------------------------
1 | import { useRef, useEffect, useState, useCallback } from 'react';
2 |
3 | /**
4 | * Custom hook similar to useCallback, but for callbacks where the dependencies
5 | * change frequently. Ensures that references to state and props inside the
6 | * callback always get the latest values. Used to keep the `listen` and `stop`
7 | * functions in sync with the latest values of the `listening` and `supported`
8 | * state variables. See this issue for an example of why this is needed:
9 | *
10 | * https://github.com/MikeyParton/react-speech-kit/issues/31
11 | *
12 | * Implementation taken from "How to read an often-changing value from
13 | * useCallback?" in the React hooks API reference:
14 | *
15 | * https://reactjs.org/docs/hooks-faq.html#how-to-read-an-often-changing-value-from-usecallback
16 | */
17 | const useEventCallback = (fn, dependencies) => {
18 | const ref = useRef(() => {
19 | throw new Error('Cannot call an event handler while rendering.');
20 | });
21 |
22 | useEffect(() => {
23 | ref.current = fn;
24 | }, [fn, ...dependencies]);
25 |
26 | return useCallback((args) => {
27 | const fn = ref.current;
28 | return fn(args);
29 | }, [ref]);
30 | };
31 |
32 | const useSpeechRecognition = (props = {}) => {
33 | const { onEnd = () => {}, onResult = () => {}, onError = () => {} } = props;
34 | const recognition = useRef(null);
35 | const [listening, setListening] = useState(false);
36 | const [supported, setSupported] = useState(false);
37 |
38 | const processResult = (event) => {
39 | const transcript = Array.from(event.results)
40 | .map((result) => result[0])
41 | .map((result) => result.transcript)
42 | .join('');
43 |
44 | onResult(transcript);
45 | };
46 |
47 | const handleError = (event) => {
48 | if (event.error === 'not-allowed') {
49 | recognition.current.onend = () => {};
50 | setListening(false);
51 | }
52 | onError(event);
53 | };
54 |
55 | const listen = useEventCallback((args = {}) => {
56 | if (listening || !supported) return;
57 | const {
58 | lang = '',
59 | interimResults = true,
60 | continuous = false,
61 | maxAlternatives = 1,
62 | grammars,
63 | } = args;
64 | setListening(true);
65 | recognition.current.lang = lang;
66 | recognition.current.interimResults = interimResults;
67 | recognition.current.onresult = processResult;
68 | recognition.current.onerror = handleError;
69 | recognition.current.continuous = continuous;
70 | recognition.current.maxAlternatives = maxAlternatives;
71 | if (grammars) {
72 | recognition.current.grammars = grammars;
73 | }
74 | // SpeechRecognition stops automatically after inactivity
75 | // We want it to keep going until we tell it to stop
76 | recognition.current.onend = () => recognition.current.start();
77 | recognition.current.start();
78 | }, [listening, supported, recognition]);
79 |
80 | const stop = useEventCallback(() => {
81 | if (!listening || !supported) return;
82 | recognition.current.onresult = () => {};
83 | recognition.current.onend = () => {};
84 | recognition.current.onerror = () => {};
85 | setListening(false);
86 | recognition.current.stop();
87 | onEnd();
88 | }, [listening, supported, recognition, onEnd]);
89 |
90 | useEffect(() => {
91 | if (typeof window === 'undefined') return;
92 | window.SpeechRecognition =
93 | window.SpeechRecognition || window.webkitSpeechRecognition;
94 | if (window.SpeechRecognition) {
95 | setSupported(true);
96 | recognition.current = new window.SpeechRecognition();
97 | }
98 | }, []);
99 |
100 | return {
101 | listen,
102 | listening,
103 | stop,
104 | supported,
105 | };
106 | };
107 |
108 | export default useSpeechRecognition;
109 |
--------------------------------------------------------------------------------
/src/useSpeechSynthesis.jsx:
--------------------------------------------------------------------------------
1 | import { useEffect, useState } from 'react';
2 |
3 | const useSpeechSynthesis = (props = {}) => {
4 | const { onEnd = () => {} } = props;
5 | const [voices, setVoices] = useState([]);
6 | const [speaking, setSpeaking] = useState(false);
7 | const [supported, setSupported] = useState(false);
8 |
9 | const processVoices = (voiceOptions) => {
10 | setVoices(voiceOptions);
11 | };
12 |
13 | const getVoices = () => {
14 | // Firefox seems to have voices upfront and never calls the
15 | // voiceschanged event
16 | let voiceOptions = window.speechSynthesis.getVoices();
17 | if (voiceOptions.length > 0) {
18 | processVoices(voiceOptions);
19 | return;
20 | }
21 |
22 | window.speechSynthesis.onvoiceschanged = (event) => {
23 | voiceOptions = event.target.getVoices();
24 | processVoices(voiceOptions);
25 | };
26 | };
27 |
28 | const handleEnd = () => {
29 | setSpeaking(false);
30 | onEnd();
31 | };
32 |
33 | useEffect(() => {
34 | if (typeof window !== 'undefined' && window.speechSynthesis) {
35 | setSupported(true);
36 | getVoices();
37 | }
38 | }, []);
39 |
40 | const speak = (args = {}) => {
41 | const { voice = null, text = '', rate = 1, pitch = 1, volume = 1 } = args;
42 | if (!supported) return;
43 | setSpeaking(true);
44 | // Firefox won't repeat an utterance that has been
45 | // spoken, so we need to create a new instance each time
46 | const utterance = new window.SpeechSynthesisUtterance();
47 | utterance.text = text;
48 | utterance.voice = voice;
49 | utterance.onend = handleEnd;
50 | utterance.rate = rate;
51 | utterance.pitch = pitch;
52 | utterance.volume = volume;
53 | window.speechSynthesis.speak(utterance);
54 | };
55 |
56 | const cancel = () => {
57 | if (!supported) return;
58 | setSpeaking(false);
59 | window.speechSynthesis.cancel();
60 | };
61 |
62 | return {
63 | supported,
64 | speak,
65 | speaking,
66 | cancel,
67 | voices,
68 | };
69 | };
70 |
71 | export default useSpeechSynthesis;
72 |
--------------------------------------------------------------------------------
/test/mocks/MockRecognition.jsx:
--------------------------------------------------------------------------------
1 | class MockRecognition {
2 | constructor() {
3 | this.onresult = () => {};
4 | this.onend = () => {};
5 | this.onerror = () => {};
6 | this.start = () => {
7 | try {
8 | // By calling startMock with the current settings,
9 | // we can test that they were updated correctly
10 | MockRecognition.start({
11 | lang: this.lang,
12 | interimResults: this.interimResults
13 | });
14 |
15 | setTimeout(() => {
16 | this.onresult(MockRecognition.mockResult);
17 | this.onend();
18 | }, 500);
19 | } catch (err) {
20 | this.onerror(err);
21 | }
22 | };
23 |
24 | this.stop = () => {
25 | MockRecognition.stop();
26 | this.onend();
27 | };
28 | }
29 | }
30 |
31 | // The mocked instance function is exposed on the class
32 | // so we can spy on it from the tests
33 | MockRecognition.mockResult = { results: [[{ transcript: 'I hear you' }]] };
34 | MockRecognition.start = jest.fn();
35 | MockRecognition.stop = jest.fn();
36 |
37 | export default MockRecognition;
38 |
--------------------------------------------------------------------------------
/test/mocks/MockSynthesis.js:
--------------------------------------------------------------------------------
1 | class MockSynthesis {
2 | static getVoices() {
3 | return this.mockVoices;
4 | }
5 |
6 | static reset() {
7 | // remove reference to current utterance
8 | this.utterance = undefined;
9 | }
10 | }
11 |
12 | MockSynthesis.speak = jest.fn((utterance) => {
13 | // Save a reference to the utterance, so we can call its
14 | // onend when it is cancelled
15 | MockSynthesis.utterance = utterance;
16 | // Let's pretend it takes 500ms to finish speaking
17 | setTimeout(() => {
18 | MockSynthesis.reset();
19 | utterance.onend();
20 | }, 500);
21 | });
22 |
23 | MockSynthesis.cancel = jest.fn(() => {
24 | if (MockSynthesis.utterance) {
25 | MockSynthesis.utterance.onend();
26 | MockSynthesis.reset();
27 | }
28 | });
29 |
30 | MockSynthesis.mockVoices = [{
31 | default: true,
32 | lang: 'en-AU',
33 | localService: true,
34 | name: 'Karen',
35 | voiceURI: 'Karen'
36 | }];
37 |
38 | export default MockSynthesis;
39 |
--------------------------------------------------------------------------------
/test/mocks/MockUtterance.js:
--------------------------------------------------------------------------------
1 | class MockUtterance {
2 | constructor(text = '') {
3 | this.text = text;
4 | }
5 | }
6 |
7 | export default MockUtterance;
8 |
--------------------------------------------------------------------------------
/test/shared/speechSynthesisTests.jsx:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import { mount } from 'enzyme';
3 | import { act } from 'react-dom/test-utils';
4 | import MockSynthesis from '../mocks/MockSynthesis';
5 | import MockUtterance from '../mocks/MockUtterance';
6 |
7 | const speechSynthesisTests = ({
8 | Example,
9 | TestComponent,
10 | mockOnEnd
11 | }) => {
12 | jest.useFakeTimers();
13 |
14 | describe('SpeechSynthesis', () => {
15 | let wrapper;
16 |
17 | beforeEach(() => {
18 | window.speechSynthesis = MockSynthesis;
19 | window.SpeechSynthesisUtterance = MockUtterance;
20 | MockSynthesis.reset();
21 | jest.clearAllTimers();
22 | jest.clearAllMocks();
23 | wrapper = mount( );
24 | });
25 |
26 | describe('initial props', () => {
27 | it('passes supported: true ', () => {
28 | expect(wrapper.find(TestComponent).props().supported).toBe(true);
29 | });
30 |
31 | it('passes speaking: false', () => {
32 | expect(wrapper.find(TestComponent).props().speaking).toBe(false);
33 | });
34 |
35 | describe('when window.speechSynthesis.getVoices returns voices immediately', () => {
36 | it('passes voices', () => {
37 | expect(wrapper.find(TestComponent).props().voices).toEqual(MockSynthesis.mockVoices);
38 | });
39 | });
40 |
41 | describe('when window.speechSynthesis.getVoices returns voices async', () => {
42 | beforeEach(() => {
43 | MockSynthesis.getVoices = () => {
44 | setTimeout(() => {
45 | MockSynthesis.getVoices = () => MockSynthesis.mockVoices;
46 | MockSynthesis.onvoiceschanged({ target: MockSynthesis });
47 | }, 500);
48 | return [];
49 | };
50 |
51 | wrapper = mount( );
52 | });
53 |
54 | it('passes voices: [] at first', () => {
55 | expect(wrapper.find(TestComponent).props().voices).toEqual([]);
56 | });
57 |
58 | it('passes voices when they load', () => {
59 | act(() => {
60 | jest.advanceTimersByTime(500);
61 | });
62 | wrapper.update();
63 | expect(wrapper.find(TestComponent).props().voices).toEqual(MockSynthesis.mockVoices);
64 | });
65 | });
66 | });
67 |
68 | describe('when speechSynthesis is unsupported', () => {
69 | beforeEach(() => {
70 | window.speechSynthesis = undefined;
71 | wrapper = mount( );
72 | });
73 |
74 | it('passes supported: false ', () => {
75 | expect(wrapper.find(TestComponent).props().supported).toBe(false);
76 | });
77 | });
78 |
79 | describe('speak()', () => {
80 | let args;
81 | beforeEach(() => {
82 | act(() => {
83 | wrapper.find(TestComponent).props().speak(args);
84 | });
85 | });
86 |
87 | it('calls window.speechSynthesis.speak with default args', () => {
88 | expect(MockSynthesis.speak.mock.calls.length).toBe(1);
89 | const receivedArgs = MockSynthesis.speak.mock.calls[0][0];
90 | expect(receivedArgs.text).toEqual('');
91 | expect(receivedArgs.voice).toEqual(null);
92 | });
93 |
94 | it('passes speaking: true', () => {
95 | wrapper.update();
96 | expect(wrapper.find(TestComponent).props().speaking).toBe(true);
97 | });
98 |
99 | it('passes speaking: false and calls the provided onEnd prop when finished', () => {
100 | act(() => {
101 | jest.advanceTimersByTime(500);
102 | });
103 | wrapper.update();
104 | expect(mockOnEnd.mock.calls.length).toBe(1);
105 | expect(wrapper.find(TestComponent).props().speaking).toBe(false);
106 | });
107 |
108 | describe('passing args', () => {
109 | beforeAll(() => {
110 | args = {
111 | text: 'Hello this is a test',
112 | voice: 'test voice'
113 | };
114 | });
115 |
116 | it('calls window.speechSynthesis.speak with provided args', () => {
117 | const receivedArgs = MockSynthesis.speak.mock.calls[0][0];
118 | expect(receivedArgs.text).toEqual('Hello this is a test');
119 | expect(receivedArgs.voice).toEqual('test voice');
120 | });
121 | });
122 | });
123 |
124 | describe('cancel()', () => {
125 | describe('while speaking', () => {
126 | it('calls window.speechSynthesis.cancel and the onEnd prop, then passes speaking: false', () => {
127 | act(() => {
128 | const testComponent = wrapper.find(TestComponent);
129 | testComponent.props().speak({
130 | text: 'Hello this is a test',
131 | voice: 'test voice'
132 | });
133 | jest.advanceTimersByTime(250);
134 | testComponent.props().cancel();
135 | });
136 |
137 | expect(MockSynthesis.cancel.mock.calls.length).toBe(1);
138 | expect(mockOnEnd.mock.calls.length).toBe(1);
139 | expect(wrapper.find(TestComponent).props().speaking).toBe(false);
140 | });
141 | });
142 |
143 | describe('while not speaking', () => {
144 | it('calls window.speechSynthesis.cancel, but does not call the onEnd prop', () => {
145 | act(() => {
146 | wrapper.find(TestComponent).props().cancel();
147 | });
148 | expect(MockSynthesis.cancel.mock.calls.length).toBe(1);
149 | expect(mockOnEnd.mock.calls.length).toBe(0);
150 | });
151 | });
152 | });
153 | });
154 | };
155 |
156 | export default speechSynthesisTests;
157 |
--------------------------------------------------------------------------------
/test/useSpeechRecognition.spec.jsx:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import { mount } from 'enzyme';
3 | import { act } from 'react-dom/test-utils';
4 | import MockRecognition from './mocks/MockRecognition';
5 | import useSpeechRecognition from '../src/useSpeechRecognition';
6 |
7 | describe('useSpeechRecognition', () => {
8 | let wrapper;
9 | jest.useFakeTimers();
10 | const mockOnEnd = jest.fn();
11 | const mockOnResult = jest.fn();
12 | const mockOnError = jest.fn();
13 | const TestComponent = () => null;
14 | const Example = () => {
15 | const props = useSpeechRecognition({
16 | onResult: mockOnResult,
17 | onEnd: mockOnEnd,
18 | onError: mockOnError,
19 | });
20 |
21 | return ;
22 | };
23 |
24 | // A test component with `listen` and `stop` in closures to test that they
25 | // reference the latest value of `listening`
26 | const ClosureExample = () => {
27 | const props = useSpeechRecognition({
28 | onResult: mockOnResult,
29 | onEnd: mockOnEnd,
30 | onError: mockOnError,
31 | });
32 |
33 | props.listenHandler = () => {
34 | setTimeout(() => props.listen(), 1000);
35 | };
36 |
37 | props.stopHandler = () => {
38 | setTimeout(() => props.stop(), 1000);
39 | };
40 |
41 | return ;
42 | };
43 |
44 | beforeEach(() => {
45 | jest.clearAllTimers();
46 | jest.clearAllMocks();
47 | window.SpeechRecognition = MockRecognition;
48 | wrapper = mount( );
49 | });
50 |
51 | describe('initial props', () => {
52 | it('passes supported: true ', () => {
53 | expect(wrapper.find(TestComponent).props().supported).toBe(true);
54 | });
55 |
56 | it('passes speaking: false', () => {
57 | expect(wrapper.find(TestComponent).props().listening).toBe(false);
58 | });
59 | });
60 |
61 | describe('when SpeechRecognition is unsupported', () => {
62 | beforeEach(() => {
63 | window.SpeechRecognition = undefined;
64 | wrapper = mount( );
65 | });
66 |
67 | it('passes supported: false ', () => {
68 | expect(wrapper.find(TestComponent).props().supported).toBe(false);
69 | });
70 | });
71 |
72 | describe('listen()', () => {
73 | let args;
74 |
75 | beforeEach(() => {
76 | act(() => {
77 | wrapper.find(TestComponent).props().listen(args);
78 | });
79 | wrapper.update();
80 | });
81 |
82 | it('calls speak on the window.speechSynthesis instance with default args', () => {
83 | expect(MockRecognition.start.mock.calls.length).toBe(1);
84 | const receivedArgs = MockRecognition.start.mock.calls[0][0];
85 | expect(receivedArgs.lang).toEqual('');
86 | expect(receivedArgs.interimResults).toBe(true);
87 | });
88 |
89 | it('passes listening: true', () => {
90 | expect(wrapper.find(TestComponent).props().listening).toBe(true);
91 | });
92 |
93 | it('calls the provided onResult prop with the transcript', () => {
94 | // MockRecognition gives result after 500ms
95 | jest.advanceTimersByTime(500);
96 | expect(mockOnResult.mock.calls.length).toBe(1);
97 | expect(mockOnResult.mock.calls[0][0]).toEqual('I hear you');
98 | });
99 |
100 | it('calls start again on the window.speechSynthesis instance if it tries to stop', () => {
101 | // MockRecognition tries to finish after 500ms
102 | jest.advanceTimersByTime(1000);
103 | expect(MockRecognition.start.mock.calls.length).toBe(3);
104 | });
105 |
106 | describe('passing args', () => {
107 | beforeAll(() => {
108 | args = {
109 | lang: 'en-US',
110 | interimResults: false,
111 | };
112 | });
113 |
114 | it('applies the args to the window.speechSynthesis instance', () => {
115 | const receivedArgs = MockRecognition.start.mock.calls[0][0];
116 | expect(receivedArgs.lang).toEqual('en-US');
117 | expect(receivedArgs.interimResults).toBe(false);
118 | });
119 | });
120 |
121 | describe('when the user blocks permission', () => {
122 | beforeAll(() => {
123 | MockRecognition.start = jest.fn(() => {
124 | throw new Error('not allowed');
125 | });
126 | });
127 |
128 | it('calls the onError function', () => {
129 | expect(mockOnError.mock.calls.length).toBe(1);
130 | });
131 | });
132 |
133 | describe('when already listening', () => {
134 | beforeEach(() => {
135 | wrapper.update();
136 | act(() => {
137 | wrapper.find(TestComponent).props().listen(args);
138 | });
139 | });
140 |
141 | it('does not call start on the window.speechSynthesis instance again', () => {
142 | expect(MockRecognition.start.mock.calls.length).toBe(1);
143 | });
144 | });
145 |
146 | describe('when already listening, in a closure', () => {
147 | it('does not call start on the window.speechSynthesis instance again', () => {
148 | jest.clearAllMocks();
149 | const wrapper = mount( );
150 | act(() => {
151 | wrapper.find(TestComponent).props().listen();
152 | wrapper.find(TestComponent).props().listenHandler();
153 | });
154 |
155 | wrapper.update();
156 | act(() => {
157 | jest.advanceTimersByTime(1000);
158 | });
159 |
160 | wrapper.update();
161 | expect(MockRecognition.start.mock.calls.length).toBe(1);
162 | });
163 | });
164 | });
165 |
166 | describe('stop()', () => {
167 | describe('while listening', () => {
168 | it('calls stop on the window.speechSynthesis instance and the provided onEnd prop, then passes listening: false', () => {
169 | act(() => {
170 | wrapper.find(TestComponent).props().listen();
171 | });
172 |
173 | wrapper.update();
174 |
175 | act(() => {
176 | wrapper.find(TestComponent).props().stop();
177 | });
178 |
179 | wrapper.update();
180 | expect(MockRecognition.stop.mock.calls.length).toBe(1);
181 | expect(mockOnEnd.mock.calls.length).toBe(1);
182 | expect(wrapper.find(TestComponent).props().listening).toBe(false);
183 | });
184 | });
185 |
186 | describe('while not listening', () => {
187 | it('does not call stop on the window.speechSynthesis instance or the provided onEnd prop', () => {
188 | act(() => {
189 | wrapper.find(TestComponent).props().stop();
190 | });
191 |
192 | expect(MockRecognition.stop.mock.calls.length).toBe(0);
193 | expect(mockOnEnd.mock.calls.length).toBe(0);
194 | });
195 | });
196 |
197 | describe('while listening, in a closure', () => {
198 | it('calls stop on the window.speechSynthesis instance and the provided onEnd prop, then passes listening: false', () => {
199 |
200 | const wrapper = mount( );
201 | act(() => {
202 | wrapper.find(TestComponent).props().listen();
203 | wrapper.find(TestComponent).props().stopHandler();
204 | });
205 |
206 | wrapper.update();
207 |
208 | act(() => {
209 | jest.advanceTimersByTime(1000);
210 | });
211 |
212 | wrapper.update();
213 | expect(MockRecognition.stop.mock.calls.length).toBe(1);
214 | expect(mockOnEnd.mock.calls.length).toBe(1);
215 | expect(wrapper.find(TestComponent).props().listening).toBe(false);
216 | });
217 | });
218 | });
219 | });
220 |
--------------------------------------------------------------------------------
/test/useSpeechSynthesis.spec.jsx:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import { mount } from 'enzyme';
3 | import { act } from 'react-dom/test-utils';
4 | import useSpeechSynthesis from '../src/useSpeechSynthesis';
5 | import MockSynthesis from './mocks/MockSynthesis';
6 | import MockUtterance from './mocks/MockUtterance';
7 |
8 | describe('SpeechSynthesis', () => {
9 | let wrapper;
10 | jest.useFakeTimers();
11 | const mockOnEnd = jest.fn();
12 | const TestComponent = () => null;
13 | const Example = () => {
14 | const props = useSpeechSynthesis({ onEnd: mockOnEnd });
15 | return ;
16 | };
17 |
18 | beforeEach(() => {
19 | window.speechSynthesis = MockSynthesis;
20 | window.SpeechSynthesisUtterance = MockUtterance;
21 | MockSynthesis.reset();
22 | jest.clearAllTimers();
23 | jest.clearAllMocks();
24 | wrapper = mount( );
25 | });
26 |
27 | describe('initial props', () => {
28 | it('passes supported: true ', () => {
29 | expect(wrapper.find(TestComponent).props().supported).toBe(true);
30 | });
31 |
32 | it('passes speaking: false', () => {
33 | expect(wrapper.find(TestComponent).props().speaking).toBe(false);
34 | });
35 |
36 | describe('when window.speechSynthesis.getVoices returns voices immediately', () => {
37 | it('passes voices', () => {
38 | expect(wrapper.find(TestComponent).props().voices).toEqual(
39 | MockSynthesis.mockVoices
40 | );
41 | });
42 | });
43 |
44 | describe('when window.speechSynthesis.getVoices returns voices async', () => {
45 | beforeEach(() => {
46 | MockSynthesis.getVoices = () => {
47 | setTimeout(() => {
48 | MockSynthesis.getVoices = () => MockSynthesis.mockVoices;
49 | MockSynthesis.onvoiceschanged({ target: MockSynthesis });
50 | }, 500);
51 | return [];
52 | };
53 |
54 | wrapper = mount( );
55 | });
56 |
57 | it('passes voices: [] at first', () => {
58 | expect(wrapper.find(TestComponent).props().voices).toEqual([]);
59 | });
60 |
61 | it('passes voices when they load', () => {
62 | act(() => {
63 | jest.advanceTimersByTime(500);
64 | });
65 | wrapper.update();
66 | expect(wrapper.find(TestComponent).props().voices).toEqual(
67 | MockSynthesis.mockVoices
68 | );
69 | });
70 | });
71 | });
72 |
73 | describe('when speechSynthesis is unsupported', () => {
74 | beforeEach(() => {
75 | window.speechSynthesis = undefined;
76 | wrapper = mount( );
77 | });
78 |
79 | it('passes supported: false ', () => {
80 | expect(wrapper.find(TestComponent).props().supported).toBe(false);
81 | });
82 | });
83 |
84 | describe('speak()', () => {
85 | let args;
86 | beforeEach(() => {
87 | act(() => {
88 | wrapper.find(TestComponent).props().speak(args);
89 | });
90 | });
91 |
92 | it('calls window.speechSynthesis.speak with default args', () => {
93 | expect(MockSynthesis.speak.mock.calls.length).toBe(1);
94 | const receivedArgs = MockSynthesis.speak.mock.calls[0][0];
95 | expect(receivedArgs.text).toEqual('');
96 | expect(receivedArgs.voice).toEqual(null);
97 | });
98 |
99 | it('passes speaking: true', () => {
100 | wrapper.update();
101 | expect(wrapper.find(TestComponent).props().speaking).toBe(true);
102 | });
103 |
104 | it('passes speaking: false and calls the provided onEnd prop when finished', () => {
105 | act(() => {
106 | jest.advanceTimersByTime(500);
107 | });
108 | wrapper.update();
109 | expect(mockOnEnd.mock.calls.length).toBe(1);
110 | expect(wrapper.find(TestComponent).props().speaking).toBe(false);
111 | });
112 |
113 | describe('passing args', () => {
114 | beforeAll(() => {
115 | args = {
116 | text: 'Hello this is a test',
117 | voice: 'test voice',
118 | };
119 | });
120 |
121 | it('calls window.speechSynthesis.speak with provided args', () => {
122 | const receivedArgs = MockSynthesis.speak.mock.calls[0][0];
123 | expect(receivedArgs.text).toEqual('Hello this is a test');
124 | expect(receivedArgs.voice).toEqual('test voice');
125 | });
126 | });
127 | });
128 |
129 | describe('cancel()', () => {
130 | describe('while speaking', () => {
131 | it('calls window.speechSynthesis.cancel and the onEnd prop, then passes speaking: false', () => {
132 | act(() => {
133 | const testComponent = wrapper.find(TestComponent);
134 | testComponent.props().speak({
135 | text: 'Hello this is a test',
136 | voice: 'test voice',
137 | });
138 | jest.advanceTimersByTime(250);
139 | testComponent.props().cancel();
140 | });
141 |
142 | expect(MockSynthesis.cancel.mock.calls.length).toBe(1);
143 | expect(mockOnEnd.mock.calls.length).toBe(1);
144 | expect(wrapper.find(TestComponent).props().speaking).toBe(false);
145 | });
146 | });
147 |
148 | describe('while not speaking', () => {
149 | it('calls window.speechSynthesis.cancel, but does not call the onEnd prop', () => {
150 | act(() => {
151 | wrapper.find(TestComponent).props().cancel();
152 | });
153 | expect(MockSynthesis.cancel.mock.calls.length).toBe(1);
154 | expect(mockOnEnd.mock.calls.length).toBe(0);
155 | });
156 | });
157 | });
158 | });
159 |
--------------------------------------------------------------------------------
/webpack.config.js:
--------------------------------------------------------------------------------
1 | const path = require('path');
2 | const HtmlWebPackPlugin = require('html-webpack-plugin');
3 |
4 | module.exports = {
5 | entry: path.join(__dirname, 'examples/src/index.jsx'),
6 | output: {
7 | path: path.join(__dirname, 'examples/dist'),
8 | filename: 'bundle.js'
9 | },
10 | module: {
11 | rules: [
12 | {
13 | test: /\.(js|jsx)$/,
14 | exclude: /node_modules/,
15 | resolve: { extensions: ['.js', '.jsx'] },
16 | use: {
17 | loader: 'babel-loader'
18 | }
19 | },
20 | {
21 | test: /\.html$/,
22 | use: [
23 | {
24 | loader: 'html-loader',
25 | options: { minimize: true }
26 | }
27 | ]
28 | },
29 | {
30 | test: /\.(png|svg|jpg|gif)$/,
31 | use: [
32 | 'file-loader'
33 | ]
34 | }
35 | ]
36 | },
37 | plugins: [
38 | new HtmlWebPackPlugin({
39 | template: './examples/src/index.html',
40 | filename: './index.html',
41 | favicon: 'examples/src/images/favicon.ico'
42 | })
43 | ],
44 | devServer: {
45 | port: 3007
46 | }
47 | };
48 |
--------------------------------------------------------------------------------