├── etc
└── thumbnail.png
├── src
├── favicon.png
├── images
│ ├── aws_logo.png
│ ├── dot-texture.png
│ └── dot-banner-right.jpg
├── index.js
├── utilities
│ ├── navbar.js
│ ├── footer.js
│ └── audio.js
├── App.js
├── App.css
├── App.test.js
├── logo.svg
├── serviceWorker.js
├── components
│ ├── Rekognition.js
│ ├── Main.js
│ ├── Transcribe.js
│ ├── Comprehend.js
│ ├── Translate.js
│ └── Polly.js
└── index.css
├── public
├── favicon.ico
├── manifest.json
└── index.html
├── amplify
├── backend
│ ├── hosting
│ │ └── S3AndCloudFront
│ │ │ ├── parameters.json
│ │ │ └── template.json
│ └── backend-config.json
└── .config
│ └── project-config.json
├── .gitignore
├── LICENSE
├── package.json
└── README.md
/etc/thumbnail.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nmwalsh/aws-ai-services-site/HEAD/etc/thumbnail.png
--------------------------------------------------------------------------------
/src/favicon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nmwalsh/aws-ai-services-site/HEAD/src/favicon.png
--------------------------------------------------------------------------------
/public/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nmwalsh/aws-ai-services-site/HEAD/public/favicon.ico
--------------------------------------------------------------------------------
/src/images/aws_logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nmwalsh/aws-ai-services-site/HEAD/src/images/aws_logo.png
--------------------------------------------------------------------------------
/src/images/dot-texture.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nmwalsh/aws-ai-services-site/HEAD/src/images/dot-texture.png
--------------------------------------------------------------------------------
/src/images/dot-banner-right.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nmwalsh/aws-ai-services-site/HEAD/src/images/dot-banner-right.jpg
--------------------------------------------------------------------------------
/amplify/backend/hosting/S3AndCloudFront/parameters.json:
--------------------------------------------------------------------------------
1 | {
2 | "bucketName": "aws-ai-services-site-20190630035901-hostingbucket"
3 | }
--------------------------------------------------------------------------------
/amplify/backend/backend-config.json:
--------------------------------------------------------------------------------
1 | {
2 | "hosting": {
3 | "S3AndCloudFront": {
4 | "service": "S3AndCloudFront",
5 | "providerPlugin": "awscloudformation"
6 | }
7 | }
8 | }
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 |
2 |
3 | #amplify
4 | amplify/\#current-cloud-backend
5 | amplify/.config/local-*
6 | amplify/backend/amplify-meta.json
7 | amplify/backend/awscloudformation
8 | amplify/team-provider-info.json
9 | build/
10 | dist/
11 | node_modules/
12 | aws-exports.js
13 | awsconfiguration.json
14 |
--------------------------------------------------------------------------------
/public/manifest.json:
--------------------------------------------------------------------------------
1 | {
2 | "short_name": "AWS AI Services",
3 | "name": "AWS AI Services Demos",
4 | "icons": [
5 | {
6 | "src": "favicon.ico",
7 | "sizes": "64x64 32x32 24x24 16x16",
8 | "type": "image/x-icon"
9 | }
10 | ],
11 | "start_url": ".",
12 | "display": "standalone",
13 | "theme_color": "#000000",
14 | "background_color": "#ffffff"
15 | }
16 |
--------------------------------------------------------------------------------
/amplify/.config/project-config.json:
--------------------------------------------------------------------------------
1 | {
2 | "projectName": "aws-ai-services-site",
3 | "version": "2.0",
4 | "frontend": "javascript",
5 | "javascript": {
6 | "framework": "react",
7 | "config": {
8 | "SourceDir": "src",
9 | "DistributionDir": "build",
10 | "BuildCommand": "npm run-script build",
11 | "StartCommand": "npm run-script start"
12 | }
13 | },
14 | "providers": [
15 | "awscloudformation"
16 | ]
17 | }
--------------------------------------------------------------------------------
/src/index.js:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import ReactDOM from 'react-dom';
3 | import App from './App';
4 | import * as serviceWorker from './serviceWorker';
5 | import 'bootstrap/dist/css/bootstrap.css';
6 | import './index.css';
7 | import { BrowserRouter } from 'react-router-dom';
8 |
9 | ReactDOM.render( , document.getElementById('root'));
10 |
11 | // If you want your app to work offline and load faster, you can change
12 | // unregister() to register() below. Note this comes with some pitfalls.
13 | // Learn more about service workers: http://bit.ly/CRA-PWA
14 | serviceWorker.unregister();
15 |
--------------------------------------------------------------------------------
/src/utilities/navbar.js:
--------------------------------------------------------------------------------
1 | import React, { Component } from 'react'
2 | import { Menu } from 'semantic-ui-react'
3 |
4 | export default class NavBarHeader extends Component {
5 | render() {
6 | return (
7 |
19 |
20 | )
21 | }
22 | }
--------------------------------------------------------------------------------
/src/utilities/footer.js:
--------------------------------------------------------------------------------
1 | import React, { Component } from 'react'
2 |
3 | export default class Footer extends Component {
4 | render() {
5 | return (
6 |
14 | )
15 | }
16 | }
--------------------------------------------------------------------------------
/src/App.js:
--------------------------------------------------------------------------------
1 | import React, { Component } from 'react';
2 | import './App.css';
3 | import { Switch, Route } from 'react-router-dom'
4 | import Polly from './components/Polly';
5 | import Transcribe from './components/Transcribe';
6 | import Main from './components/Main';
7 | import Comprehend from './components/Comprehend';
8 | import Rekognition from './components/Rekognition';
9 | import Translate from './components/Translate';
10 | //import NavBar from './utilities/navbar';
11 | //import Textract from './components/Textract';
12 |
13 | class App extends Component {
14 |
15 | render() {
16 | return (
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 | )
25 | }
26 | }
27 |
28 | export default App;
29 |
--------------------------------------------------------------------------------
/src/App.css:
--------------------------------------------------------------------------------
1 | .App {
2 | /*text-align: center;*/
3 | }
4 |
5 | .App-logo {
6 | animation: App-logo-spin infinite 20s linear;
7 | height: 40vmin;
8 | pointer-events: none;
9 | }
10 |
11 | .App-header {
12 | background-color: #282c34;
13 | min-height: 100vh;
14 | display: flex;
15 | flex-direction: column;
16 | align-items: center;
17 | justify-content: center;
18 | font-size: calc(10px + 2vmin);
19 | color: white;
20 | }
21 |
22 | .App-link {
23 | color: #61dafb;
24 | }
25 |
26 | @keyframes App-logo-spin {
27 | from {
28 | transform: rotate(0deg);
29 | }
30 | to {
31 | transform: rotate(360deg);
32 | }
33 | }
34 |
35 |
36 | .step{
37 | padding: 40px;
38 | }
39 |
40 | .stepTitle {
41 | padding-bottom: 20px;
42 | }
43 |
44 | .stepInstructions {
45 | padding-top: 20px;
46 | }
47 |
48 | .defunct-banner {
49 | background-color: #FF7F50; /* Coral red */
50 | color: white;
51 | text-align: center;
52 | padding: 15px;
53 | font-weight: bold;
54 | width: 100%;
55 | margin: 0;
56 | font-size: 16px;
57 | }
--------------------------------------------------------------------------------
/src/App.test.js:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import ReactDOM from 'react-dom';
3 | import App from './App';
4 | import Main from './components/Main';
5 | import { BrowserRouter as Router } from 'react-router-dom';
6 |
7 | it('renders without crashing', () => {
8 | const div = document.createElement('div');
9 | ReactDOM.render( , div);
10 | ReactDOM.unmountComponentAtNode(div);
11 | });
12 |
13 | it('renders the defunct banner on Main component', () => {
14 | const div = document.createElement('div');
15 | ReactDOM.render(
16 |
17 |
18 | ,
19 | div
20 | );
21 |
22 | // Check if the banner exists
23 | const bannerElement = div.querySelector('.defunct-banner');
24 | expect(bannerElement).not.toBeNull();
25 |
26 | // Check if the banner has the correct text
27 | expect(bannerElement.textContent).toBe(
28 | "This demo site is no longer operational. To try out the latest capabilities, please visit the respective service demo within the AWS Console."
29 | );
30 |
31 | ReactDOM.unmountComponentAtNode(div);
32 | });
33 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Permission is hereby granted, free of charge, to any person obtaining a copy
4 | of this software and associated documentation files (the "Software"), to deal
5 | in the Software without restriction, including without limitation the rights
6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7 | copies of the Software, and to permit persons to whom the Software is
8 | furnished to do so, subject to the following conditions:
9 |
10 | The above copyright notice and this permission notice shall be included in all
11 | copies or substantial portions of the Software.
12 |
13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19 | SOFTWARE.
20 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "ml-javascript",
3 | "version": "0.1.0",
4 | "private": true,
5 | "dependencies": {
6 | "aws-sdk": "^2.868.0",
7 | "bootstrap": "^4.3.1",
8 | "data-uri-to-buffer": "^2.0.0",
9 | "jquery": "^3.5.0",
10 | "popper": "^1.0.1",
11 | "react": "^16.8.2",
12 | "react-audio-player": "^0.11.0",
13 | "react-audio-recorder": "^2.2.0",
14 | "react-bootstrap": "^1.0.0-beta.14",
15 | "react-dom": "^16.8.2",
16 | "react-github-btn": "^1.1.1",
17 | "react-mic": "^12.4.1",
18 | "react-router-dom": "^4.3.1",
19 | "react-scripts": "2.1.5",
20 | "react-simple-card": "^2.0.2",
21 | "react-webcam": "^1.1.1",
22 | "recorder-js": "^1.0.7",
23 | "semantic-ui-react": "^0.85.0",
24 | "three": "^0.137.0"
25 | },
26 | "scripts": {
27 | "start": "react-scripts start",
28 | "build": "react-scripts build",
29 | "test": "react-scripts test",
30 | "eject": "react-scripts eject"
31 | },
32 | "eslintConfig": {
33 | "extends": "react-app"
34 | },
35 | "browserslist": [
36 | ">0.2%",
37 | "not dead",
38 | "not ie <= 11",
39 | "not op_mini all"
40 | ]
41 | }
42 |
--------------------------------------------------------------------------------
/public/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
10 |
11 |
15 |
16 |
25 | AWS AI Service Demos
26 |
27 |
28 | You need to enable JavaScript to run this app.
29 |
30 |
40 |
41 |
42 |
--------------------------------------------------------------------------------
/src/logo.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # AWS AI Service Demos
2 |
3 | Try out various AI services from AWS, no code or account required.
4 |
5 |
6 |
7 | Demo site hosted at [https://ai-service-demos.go-aws.com](https://ai-service-demos.go-aws.com)
8 |
9 | ### Included examples:
10 |
11 | All components available in `src/components/`
12 |
13 | - [Amazon Transcribe](https://ai-service-demos.go-aws.com/transcribe)
14 | - `Transcribe.js`: Batch async transcription job for custom audio
15 | - [Amazon Polly](https://ai-service-demos.go-aws.com/polly)
16 | - `Polly.js`: Text to speech with standard or neural voice engine across all available languages
17 | - [Amazon Comprehend](https://ai-service-demos.go-aws.com/comprehend)
18 | - `Comprehend.js`: Sentiment, Entity, Key Phrase, and Syntax Token detection
19 | - [Amazon Rekognition](https://ai-service-demos.go-aws.com/rekognition)
20 | - `Rekognition.js`: Object detection
21 | - [Amazon Translate](https://ai-service-demos.go-aws.com/translate)
22 | - `Translate.js`: Text to text translate
23 |
24 | ### Installing
25 |
26 | To run/test locally:
27 |
28 | `npm install`
29 |
30 | `npm start`
31 |
32 | `https://localhost:3000`
33 |
34 | ## AWS AI Service Free Tiers
35 |
36 | The services covered in this demo all have very generous free tiers. At a glance:
37 |
38 | | Service | Description | Quantity |
39 | |--------------------|--------------------------------|--------------------------------|
40 | | Amazon Translate | Text-Text Translation | 2 million characters/month |
41 | | Amazon Polly | Text to Speech | 5 million characters/month |
42 | | Amazon Comprehend | Natural Language Understanding | 5 million characters/API/month |
43 | | Amazon Rekognition | Computer Vision | 5k images/month |
44 | | Amazon Transcribe | Audio to Text Transcription | 60 minutes/month |
45 |
46 | For the most up-to-date info on free tier status, check out [the live pricing page here](https://aws.amazon.com/free/).
47 |
48 | ## Built With
49 |
50 | * [AWS AI Services](https://aws.amazon.com/machine-learning/ai-services/) - Fully managed AI services, on a pay-per-use model.
51 | * [AWS Amplify](https://aws.amazon.com/amplify/) - Development toolchain for building and deploying webapps
52 |
53 | ## Contributing
54 |
55 | Have functionality you'd like to see, or a new AI service you want a demo for? Ping me on Twitter ([@TheNickWalsh](https://twitter.com/thenickwalsh)] or open an issue here.
56 |
57 | ## License
58 |
59 | This project is licensed under the MIT License - see the [LICENSE.md](LICENSE.md) file for details
60 |
61 | ## Acknowledgments
62 |
63 | * Thanks to [Nicki Stone](https://twitter.com/kneekey23) for the late night javascript help
64 |
--------------------------------------------------------------------------------
/amplify/backend/hosting/S3AndCloudFront/template.json:
--------------------------------------------------------------------------------
1 | {
2 | "AWSTemplateFormatVersion": "2010-09-09",
3 | "Description": "Hosting resource stack creation using Amplify CLI",
4 | "Parameters": {
5 | "env": {
6 | "Type": "String"
7 | },
8 | "bucketName": {
9 | "Type": "String"
10 | }
11 | },
12 | "Conditions": {
13 | "ShouldNotCreateEnvResources": {
14 | "Fn::Equals": [
15 | {
16 | "Ref": "env"
17 | },
18 | "NONE"
19 | ]
20 | }
21 | },
22 | "Resources": {
23 | "S3Bucket": {
24 | "Type": "AWS::S3::Bucket",
25 | "DeletionPolicy": "Retain",
26 | "Properties": {
27 | "BucketName": {
28 | "Fn::If": [
29 | "ShouldNotCreateEnvResources",
30 | {
31 | "Ref": "bucketName"
32 | },
33 | {
34 | "Fn::Join": [
35 | "",
36 | [
37 | {
38 | "Ref": "bucketName"
39 | },
40 | "-",
41 | {
42 | "Ref": "env"
43 | }
44 | ]
45 | ]
46 | }
47 | ]
48 | },
49 | "AccessControl": "Private",
50 | "WebsiteConfiguration": {
51 | "IndexDocument": "index.html",
52 | "ErrorDocument": "index.html"
53 | },
54 | "CorsConfiguration": {
55 | "CorsRules": [
56 | {
57 | "AllowedHeaders": [
58 | "Authorization",
59 | "Content-Length"
60 | ],
61 | "AllowedMethods": [
62 | "GET"
63 | ],
64 | "AllowedOrigins": [
65 | "*"
66 | ],
67 | "MaxAge": 3000
68 | }
69 | ]
70 | }
71 | }
72 | }
73 | },
74 | "Outputs": {
75 | "Region": {
76 | "Value": {
77 | "Ref": "AWS::Region"
78 | }
79 | },
80 | "HostingBucketName": {
81 | "Description": "Hosting bucket name",
82 | "Value": {
83 | "Ref": "S3Bucket"
84 | }
85 | },
86 | "WebsiteURL": {
87 | "Value": {
88 | "Fn::GetAtt": [
89 | "S3Bucket",
90 | "WebsiteURL"
91 | ]
92 | },
93 | "Description": "URL for website hosted on S3"
94 | },
95 | "S3BucketSecureURL": {
96 | "Value": {
97 | "Fn::Join": [
98 | "",
99 | [
100 | "https://",
101 | {
102 | "Fn::GetAtt": [
103 | "S3Bucket",
104 | "DomainName"
105 | ]
106 | }
107 | ]
108 | ]
109 | },
110 | "Description": "Name of S3 bucket to hold website content"
111 | }
112 | }
113 | }
--------------------------------------------------------------------------------
/src/utilities/audio.js:
--------------------------------------------------------------------------------
1 | /**
2 | * Get access to the users microphone through the browser.
3 | *
4 | * https://developer.mozilla.org/en-US/docs/Web/API/MediaDevices/getUserMedia#Using_the_new_API_in_older_browsers
5 | */
6 | function getAudioStream() {
7 | // Older browsers might not implement mediaDevices at all, so we set an empty object first
8 | if (navigator.mediaDevices === undefined) {
9 | navigator.mediaDevices = {};
10 | }
11 |
12 | // Some browsers partially implement mediaDevices. We can't just assign an object
13 | // with getUserMedia as it would overwrite existing properties.
14 | // Here, we will just add the getUserMedia property if it's missing.
15 | if (navigator.mediaDevices.getUserMedia === undefined) {
16 | navigator.mediaDevices.getUserMedia = function(constraints) {
17 | // First get ahold of the legacy getUserMedia, if present
18 | var getUserMedia =
19 | navigator.webkitGetUserMedia || navigator.mozGetUserMedia;
20 |
21 | // Some browsers just don't implement it - return a rejected promise with an error
22 | // to keep a consistent interface
23 | if (!getUserMedia) {
24 | return Promise.reject(
25 | new Error('getUserMedia is not implemented in this browser')
26 | );
27 | }
28 |
29 | // Otherwise, wrap the call to the old navigator.getUserMedia with a Promise
30 | return new Promise(function(resolve, reject) {
31 | getUserMedia.call(navigator, constraints, resolve, reject);
32 | });
33 | };
34 | }
35 |
36 | const params = { audio: true, video: false };
37 |
38 | return navigator.mediaDevices.getUserMedia(params);
39 | }
40 |
41 | /**
42 | * Snippets taken from:
43 | * https://aws.amazon.com/blogs/machine-learning/capturing-voice-input-in-a-browser/
44 | */
45 |
46 | const recordSampleRate = 44100;
47 |
48 | /**
49 | * Samples the buffer at 44100 kHz.
50 | */
51 | function downsampleBuffer(buffer, exportSampleRate) {
52 | if (exportSampleRate === recordSampleRate) {
53 | return buffer;
54 | }
55 |
56 |
57 | const sampleRateRatio = recordSampleRate / exportSampleRate;
58 | const newLength = Math.round(buffer.length / sampleRateRatio);
59 | const result = new Float32Array(newLength);
60 |
61 | let offsetResult = 0;
62 | let offsetBuffer = 0;
63 |
64 | while (offsetResult < result.length) {
65 | const nextOffsetBuffer = Math.round((offsetResult + 1) * sampleRateRatio);
66 | let accum = 0;
67 | let count = 0;
68 |
69 | for (var i = offsetBuffer; i < nextOffsetBuffer && i < buffer.length; i++) {
70 | accum += buffer[i];
71 | count++;
72 | }
73 |
74 | result[offsetResult] = accum / count;
75 | offsetResult++;
76 | offsetBuffer = nextOffsetBuffer;
77 | }
78 |
79 | return result;
80 | }
81 |
82 | function floatTo16BitPCM(output, offset, input) {
83 | for (let i = 0; i < input.length; i++, offset += 2) {
84 | const s = Math.max(-1, Math.min(1, input[i]));
85 | output.setInt16(offset, s < 0 ? s * 0x8000 : s * 0x7fff, true);
86 | }
87 | }
88 |
89 | function writeString(view, offset, string) {
90 | for (let i = 0; i < string.length; i++) {
91 | view.setUint8(offset + i, string.charCodeAt(i));
92 | }
93 | }
94 |
95 | /**
96 | * Encodes the buffer as a WAV file.
97 | */
98 | function encodeWAV(samples) {
99 | const buffer = new ArrayBuffer(44 + samples.length * 2);
100 | const view = new DataView(buffer);
101 |
102 | writeString(view, 0, 'RIFF');
103 | view.setUint32(4, 32 + samples.length * 2, true);
104 | writeString(view, 8, 'WAVE');
105 | writeString(view, 12, 'fmt ');
106 | view.setUint32(16, 16, true);
107 | view.setUint16(20, 1, true);
108 | view.setUint16(22, 1, true);
109 | view.setUint32(24, recordSampleRate, true);
110 | view.setUint32(28, recordSampleRate * 2, true);
111 | view.setUint16(32, 2, true);
112 | view.setUint16(34, 16, true);
113 | writeString(view, 36, 'data');
114 | view.setUint32(40, samples.length * 2, true);
115 | floatTo16BitPCM(view, 44, samples);
116 |
117 | return view;
118 | }
119 |
120 | /**
121 | * Samples the buffer at 16 kHz.
122 | * Encodes the buffer as a WAV file.
123 | * Returns the encoded audio as a Blob.
124 | */
125 | function exportBuffer(recBuffer) {
126 | const downsampledBuffer = downsampleBuffer(recBuffer, recordSampleRate);
127 | const encodedWav = encodeWAV(downsampledBuffer);
128 |
129 | const audioBlob = new Blob([encodedWav], {
130 | type: 'application/octet-stream'
131 | });
132 |
133 | return audioBlob;
134 | }
135 |
136 | export { getAudioStream, exportBuffer };
--------------------------------------------------------------------------------
/src/serviceWorker.js:
--------------------------------------------------------------------------------
1 | // This optional code is used to register a service worker.
2 | // register() is not called by default.
3 |
4 | // This lets the app load faster on subsequent visits in production, and gives
5 | // it offline capabilities. However, it also means that developers (and users)
6 | // will only see deployed updates on subsequent visits to a page, after all the
7 | // existing tabs open on the page have been closed, since previously cached
8 | // resources are updated in the background.
9 |
10 | // To learn more about the benefits of this model and instructions on how to
11 | // opt-in, read http://bit.ly/CRA-PWA
12 |
13 | const isLocalhost = Boolean(
14 | window.location.hostname === 'localhost' ||
15 | // [::1] is the IPv6 localhost address.
16 | window.location.hostname === '[::1]' ||
17 | // 127.0.0.1/8 is considered localhost for IPv4.
18 | window.location.hostname.match(
19 | /^127(?:\.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}$/
20 | )
21 | );
22 |
23 | export function register(config) {
24 | if (process.env.NODE_ENV === 'production' && 'serviceWorker' in navigator) {
25 | // The URL constructor is available in all browsers that support SW.
26 | const publicUrl = new URL(process.env.PUBLIC_URL, window.location.href);
27 | if (publicUrl.origin !== window.location.origin) {
28 | // Our service worker won't work if PUBLIC_URL is on a different origin
29 | // from what our page is served on. This might happen if a CDN is used to
30 | // serve assets; see https://github.com/facebook/create-react-app/issues/2374
31 | return;
32 | }
33 |
34 | window.addEventListener('load', () => {
35 | const swUrl = `${process.env.PUBLIC_URL}/service-worker.js`;
36 |
37 | if (isLocalhost) {
38 | // This is running on localhost. Let's check if a service worker still exists or not.
39 | checkValidServiceWorker(swUrl, config);
40 |
41 | // Add some additional logging to localhost, pointing developers to the
42 | // service worker/PWA documentation.
43 | navigator.serviceWorker.ready.then(() => {
44 | console.log(
45 | 'This web app is being served cache-first by a service ' +
46 | 'worker. To learn more, visit http://bit.ly/CRA-PWA'
47 | );
48 | });
49 | } else {
50 | // Is not localhost. Just register service worker
51 | registerValidSW(swUrl, config);
52 | }
53 | });
54 | }
55 | }
56 |
57 | function registerValidSW(swUrl, config) {
58 | navigator.serviceWorker
59 | .register(swUrl)
60 | .then(registration => {
61 | registration.onupdatefound = () => {
62 | const installingWorker = registration.installing;
63 | if (installingWorker == null) {
64 | return;
65 | }
66 | installingWorker.onstatechange = () => {
67 | if (installingWorker.state === 'installed') {
68 | if (navigator.serviceWorker.controller) {
69 | // At this point, the updated precached content has been fetched,
70 | // but the previous service worker will still serve the older
71 | // content until all client tabs are closed.
72 | console.log(
73 | 'New content is available and will be used when all ' +
74 | 'tabs for this page are closed. See http://bit.ly/CRA-PWA.'
75 | );
76 |
77 | // Execute callback
78 | if (config && config.onUpdate) {
79 | config.onUpdate(registration);
80 | }
81 | } else {
82 | // At this point, everything has been precached.
83 | // It's the perfect time to display a
84 | // "Content is cached for offline use." message.
85 | console.log('Content is cached for offline use.');
86 |
87 | // Execute callback
88 | if (config && config.onSuccess) {
89 | config.onSuccess(registration);
90 | }
91 | }
92 | }
93 | };
94 | };
95 | })
96 | .catch(error => {
97 | console.error('Error during service worker registration:', error);
98 | });
99 | }
100 |
101 | function checkValidServiceWorker(swUrl, config) {
102 | // Check if the service worker can be found. If it can't reload the page.
103 | fetch(swUrl)
104 | .then(response => {
105 | // Ensure service worker exists, and that we really are getting a JS file.
106 | const contentType = response.headers.get('content-type');
107 | if (
108 | response.status === 404 ||
109 | (contentType != null && contentType.indexOf('javascript') === -1)
110 | ) {
111 | // No service worker found. Probably a different app. Reload the page.
112 | navigator.serviceWorker.ready.then(registration => {
113 | registration.unregister().then(() => {
114 | window.location.reload();
115 | });
116 | });
117 | } else {
118 | // Service worker found. Proceed as normal.
119 | registerValidSW(swUrl, config);
120 | }
121 | })
122 | .catch(() => {
123 | console.log(
124 | 'No internet connection found. App is running in offline mode.'
125 | );
126 | });
127 | }
128 |
129 | export function unregister() {
130 | if ('serviceWorker' in navigator) {
131 | navigator.serviceWorker.ready.then(registration => {
132 | registration.unregister();
133 | });
134 | }
135 | }
136 |
--------------------------------------------------------------------------------
/src/components/Rekognition.js:
--------------------------------------------------------------------------------
1 | import React, {Component} from 'react';
2 | import NavBar from '../utilities/navbar';
3 | import Footer from '../utilities/footer';
4 | import { Form } from 'semantic-ui-react';
5 | import Webcam from 'react-webcam';
6 | var dataUriToBuffer = require('data-uri-to-buffer');
7 | var AWS = require('aws-sdk');
8 | AWS.config.region = 'us-east-1';
9 | AWS.config.credentials = new AWS.CognitoIdentityCredentials({IdentityPoolId: 'us-east-1:1956382a-b3f6-472c-9a8d-3a246853c917'});
10 |
11 | class Rekognition extends Component {
12 | constructor(props){
13 | super(props);
14 |
15 | this.state = {
16 | image: '',
17 | resultMessage: '',
18 | resultLabels: [],
19 | imageSrc: '',
20 |
21 | }
22 | this.handleCapture = this.handleCapture.bind(this);
23 | this.sendImageToRekognition = this.sendImageToRekognition.bind(this);
24 | }
25 | setRef = webcam => {
26 | this.webcam = webcam;
27 | };
28 |
29 | handleCapture=() => {
30 |
31 | const imageSrc = this.webcam.getScreenshot()
32 | this.sendImageToRekognition(imageSrc)
33 | }
34 |
35 | sendImageToRekognition = (imageSrc) => {
36 |
37 | // convert image to buffer from base64
38 | let buffer = dataUriToBuffer(imageSrc)
39 |
40 | // API call params
41 | var RekognitionParams = {
42 | Image: {
43 | Bytes: buffer
44 | /* Alternatively, you can provide an S3 object
45 | S3Object: {
46 | Bucket: 'STRING_VALUE',
47 | Name: 'STRING_VALUE',
48 | Version: 'STRING_VALUE'
49 | }*/
50 | },
51 | };
52 |
53 | // instantiate Rekognition client
54 | var rekognition = new AWS.Rekognition({apiVersion: '2017-07-01'});
55 | let currentComponent = this;
56 |
57 | // call Rekognition's detectLabels method
58 | rekognition.detectLabels(RekognitionParams, function (err, data){
59 | if (err) {
60 | currentComponent.setState({resultMessage: err.message});
61 | }
62 | else {
63 | console.log(data);
64 | currentComponent.setState({resultLabels: data.Labels});
65 | currentComponent.setState({resultMessage: "Classification successful!"})
66 | }
67 | });
68 |
69 | }
70 |
71 |
72 | render(){
73 | let result, labels;
74 | if(this.state.resultMessage !== ''){
75 | result = {this.state.resultMessage}
76 | labels = this.state.resultLabels.map((label, i) => {
77 | return (
78 |
79 | {label.Name}
80 |
81 |
82 | {label.Confidence}
83 |
84 |
85 | )
86 |
87 | })
88 |
89 | }
90 | const videoConstraints = {
91 | facingMode: "user"
92 | };
93 | return (
94 |
95 |
96 |
97 | This demo site is no longer operational. To try out the latest capabilities, please visit the respective service demo within the AWS Console.
98 |
99 |
100 |
101 |
102 |
Amazon Rekognition
103 |
104 |
105 |
106 |
Amazon Rekognition makes it easy to add image and video analysis to your applications. You just provide an image or video to the Amazon Rekognition API, and the service can identify objects, people, text, scenes, and activities. It can detect any inappropriate content as well. Amazon Rekognition also provides highly accurate facial analysis and facial recognition. You can detect, analyze, and compare faces for a wide variety of use cases, including user verification, cataloging, people counting, and public safety.
107 |
108 |
In this example, we're going to show how easy it is to send an image to Amazon Rekognition to perform object identification.
109 |
110 | Methods:
111 |
112 |
113 |
114 |
115 |
116 |
Step 1: Take picture
117 |
128 | Detect Labels
129 |
130 |
131 |
Results: {result}
132 |
133 |
134 |
135 |
136 | Label
137 |
138 |
139 | Confidence
140 |
141 |
142 |
143 |
144 | {labels}
145 |
146 |
147 |
148 |
149 |
150 |
151 |
152 |
153 | )
154 | }
155 | }
156 |
157 | export default Rekognition
--------------------------------------------------------------------------------
/src/index.css:
--------------------------------------------------------------------------------
1 | /*
2 | BRAND COLORS
3 |
4 | white: #FFFFFF
5 | orange: #FF9900
6 | light navy: #232F3E
7 | dark navy: #161E2D
8 | */
9 |
10 | .container{
11 | padding-top:15px;
12 | position: relative;
13 | min-height: 94vh;
14 | }
15 |
16 | .header wrap{
17 | margin: 0 auto;
18 | width:100%;
19 | }
20 |
21 | .content-wrap{
22 | padding-bottom: 100px;
23 | }
24 |
25 | .hero {
26 | margin-top:25px;
27 | margin-bottom: 50px;
28 | }
29 |
30 | .bg-pattern {
31 | background-image: url("images/dot-texture.png");
32 | background-repeat: no-repeat;
33 | position: absolute;
34 | /*background-size: 100% 900px;*/
35 | height:100%;
36 | width:50%;
37 | top:0px;
38 | left:80%;
39 | opacity: 0.5;
40 | z-index:-1;
41 | margin: 50 auto 0 auto;
42 | overflow: hidden;
43 | }
44 |
45 | .hero img {
46 | padding-top:15px;
47 | }
48 |
49 | body {
50 | background-color: #161E2D;
51 | margin: 0;
52 | padding: 0;
53 | font-family: "Amazon Ember", -apple-system, BlinkMacSystemFont, "Segoe UI", "Roboto", "Oxygen",
54 | "Ubuntu", "Cantarell", "Fira Sans", "Droid Sans", "Helvetica Neue",
55 | sans-serif;
56 | -webkit-font-smoothing: antialiased;
57 | -moz-osx-font-smoothing: grayscale;
58 | }
59 |
60 | h1, h2, h3, h4, h5 {
61 | color: white;
62 | }
63 |
64 | h5 {
65 | font-weight: 700;
66 | font-size: 1.5rem;
67 | }
68 |
69 | p {
70 | color: white;
71 |
72 | }
73 |
74 | canvas{
75 | position: absolute;
76 | top:0;
77 | left:0;
78 | z-index:-1;
79 | }
80 |
81 | .card {
82 | /*height: 310px;*/
83 | width: 18rem;
84 | border: 2px solid #FF9900;
85 | background-color: #232F3E;
86 | margin:15px;
87 |
88 | box-shadow: 0rem 0 1rem rgb(7, 5, 6);
89 | transition: 0.4s ease-out;
90 | /*position: relative;*/
91 | /*left: 0px;*/
92 |
93 | box-sizing: border-box;
94 | display: inline-block;
95 | float: none;
96 | text-align:left;
97 | }
98 |
99 | .card + .card {
100 | margin-left: 2%;
101 | }
102 |
103 | /*
104 | .card:not(:first-child) {
105 | margin-left: 20px;
106 | }
107 | */
108 | .flex-container{
109 | webkit-justify-content: center;
110 | text-align: center;
111 | padding: 0;
112 | margin: 0;
113 | display: -webkit-box;
114 | display: -moz-box;
115 | display: -ms-flexbox;
116 | display: -webkit-flex;
117 | display: flex;
118 | align-items: center;
119 | justify-content: center;
120 | }
121 |
122 | .bar {
123 | width: 100%;
124 | height: 3px;
125 | margin-bottom:5px;
126 | }
127 |
128 | .emptybar {
129 | background-color: #2e3033;
130 | width: 100%;
131 | height: 3px;
132 | margin-bottom: 4px;
133 | }
134 |
135 | .filledbar {
136 | position: relative;
137 | margin-top:-8px;
138 | z-index: 3;
139 | width: 0px;
140 | height: 3px;
141 | /*background: rgb(0,154,217);*/
142 | background: linear-gradient(90deg, rgb(255, 110, 127) 0%, rgba(217,147,0,1) 65%, rgba(255,186,0,1) 100%);
143 | /* blue to yellow
144 | background: linear-gradient(90deg, rgba(0,154,217,1) 0%, rgba(217,147,0,1) 65%, rgba(255,186,0,1) 100%);
145 | */
146 | transition: 0.5s ease-out;
147 | }
148 |
149 | .titlebar {
150 | display: block;
151 | position: relative;
152 | background: linear-gradient(90deg, rgb(255, 110, 127) 0%, rgba(217,147,0,1) 65%, rgba(255,186,0,1) 100%);
153 | width: 45%;
154 | height: 4px;
155 | left: -15px;
156 | margin-bottom: 10px;
157 | }
158 |
159 | .card:hover .filledbar {
160 | width: 100%;
161 | transition: 0.2s ease-out;
162 | }
163 |
164 | .btn-info {
165 | color: white;
166 | background-color:transparent;
167 | border-color:#FF9900;
168 | margin-right: 15px;
169 | }
170 |
171 | .btn-info:hover {
172 | background-color:#FF9900;
173 | color: white;
174 | border-color:#FF9900;
175 | box-shadow: #FF9900;
176 | }
177 |
178 | .btn-info:active,
179 | .btn-info:active:focus,
180 | .btn-info:visited {
181 | background-color:#FF9900;
182 |
183 | }
184 |
185 | pre code {
186 | display: block;
187 | background-color: #eee;
188 | border: 1px solid #999;
189 | display: block;
190 | padding: 20px;
191 | }
192 |
193 | code {
194 | font-family: source-code-pro, Menlo, Monaco, Consolas, "Courier New",
195 | monospace;
196 | }
197 |
198 |
199 | .td, tr{
200 | color:white;
201 | }
202 |
203 | .footer {
204 | left:0px;
205 | bottom:0px;
206 | margin-top:70px;
207 | width:100%;
208 | background:#161E2D;
209 | position: inline-block;
210 | }
211 |
212 | .footer p{
213 | color:gray;
214 | }
215 |
216 | .footer-demo {
217 | left:0px;
218 | bottom:0px;
219 | margin-top:75px;
220 | width:100%;
221 | background:#161E2D;
222 | position:absolute;
223 | }
224 | .footer-demo p{
225 | color:gray;
226 | }
227 |
228 | /* accordion styles on Comprehend page */
229 | body {
230 | padding: 0 1em 1em;
231 | }
232 |
233 | /* hideable radio button input */
234 | input hidden {
235 | position: absolute;
236 | opacity: 0;
237 | z-index: -1;
238 | }
239 |
240 | .row {
241 | display: flex;
242 | }
243 | .row .col {
244 | flex: 1;
245 | }
246 |
247 | .row .col:last-child {
248 | margin-left: 1em;
249 | }
250 |
251 | /* Accordion styles */
252 | /*
253 |
254 | input {
255 | position: absolute;
256 | opacity: 0;
257 | z-index: -1;
258 | }
259 |
260 | .row {
261 | display: flex;
262 | }
263 | .row .col {
264 | flex: 1;
265 | }
266 | .row .col:last-child {
267 | margin-left: 1em;
268 | }
269 | */
270 | /* Accordion styles */
271 | .tabs {
272 | border-radius: 8px;
273 | overflow: hidden;
274 | box-shadow: 0 4px 4px -2px rgba(0, 0, 0, 0.5);
275 | }
276 |
277 | .tab {
278 | width: 100%;
279 | color: white;
280 | overflow: hidden;
281 | }
282 | .tab-label {
283 | display: flex;
284 | justify-content: space-between;
285 | padding: 1em;
286 | background: #2c3e50;
287 | font-weight: bold;
288 | cursor: pointer;
289 | margin-bottom: 0px;
290 | /* Icon */
291 | }
292 | .tab-label:hover {
293 | background: #1a252f;
294 | }
295 | .tab-label::after {
296 | content: "\276F";
297 | width: 1em;
298 | height: 1em;
299 | text-align: center;
300 | transition: all .35s;
301 | }
302 | .tab-content {
303 | max-height: 0;
304 | padding: 0 1em;
305 | color: #4f84b9;
306 | background: white;
307 | transition: all .35s;
308 | }
309 | .tab-close {
310 | display: flex;
311 | justify-content: flex-end;
312 | padding: 1em;
313 | font-size: 0.75em;
314 | background: #4f84b9;
315 | cursor: pointer;
316 | }
317 | .tab-close:hover {
318 | background: #1a252f;
319 | }
320 |
321 | input:checked + .tab-label {
322 | background: #1a252f;
323 | }
324 | input:checked + .tab-label::after {
325 | -webkit-transform: rotate(90deg);
326 | transform: rotate(90deg);
327 | }
328 | input:checked ~ .tab-content {
329 | max-height: 100vh;
330 | padding: 1em;
331 | }
332 |
333 | /* component navbar */
334 |
335 | menu{
336 | margin-bottom:50px;
337 | }
338 | ul {
339 | list-style-type: none;
340 | margin: 0;
341 | padding: 0;
342 | }
343 |
344 | span li {
345 | display: inline;
346 | margin-right: 1.5rem;
347 | }
348 |
349 | .item{
350 | color:lightgray;
351 | }
352 |
353 | .item a{
354 | text-decoration:none;
355 | }
356 | .item:hover{
357 | color:#F8991d;
358 | text-decoration:none;
359 | }
360 |
361 | .ui.pointing.secondary.menu{
362 | background: rgb(0,0,0,0);
363 | position:relative;
364 | margin-top: 1em;
365 | margin-bottom: 1em;
366 | height:1.5em;
367 | margin-right: auto;
368 | margin-left: auto;
369 | }
--------------------------------------------------------------------------------
/src/components/Main.js:
--------------------------------------------------------------------------------
1 | import React, {Component} from 'react'
2 | import GitHubButton from 'react-github-btn'
3 | import Footer from '../utilities/footer';
4 | import '../App.css';
5 | //import * as THREE from "three";
6 |
7 | class Main extends Component {
8 |
9 | render() {
10 | return(
11 |
12 | This demo site is no longer operational. To try out the latest capabilities, please visit the respective service demo within the AWS Console.
13 |
14 |
15 |
16 |
17 |
22 |
23 |
AWS AI Service Demos
24 |
25 | AWS pre-trained AI Services provide ready-made intelligence for your applications and workflows.
26 |
27 | Get started powering your applications in minutes: no machine learning knowledge required .
28 |
29 |
30 | See them in action in the examples below, and check out the code samples on GitHub.
31 |
32 |
Star
33 | ' '
34 |
Fork
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
Amazon Transcribe
44 |
48 |
Amazon Transcribe is an automatic speech recognition (ASR) service that makes it easy for developers to add speech-to-text capability to their applications.
49 |
50 |
Try Transcribe
51 |
Docs
52 |
53 |
54 |
55 |
56 |
Amazon Polly
57 |
61 |
Amazon Polly is a service that turns text into lifelike speech, allowing you to create applications that talk, and build entirely new categories of speech-enabled products.
62 |
63 |
Try Polly
64 |
Docs
65 |
66 |
67 |
68 |
69 |
Amazon Comprehend
70 |
74 |
Amazon Comprehend is a natural language processing (NLP) service that uses machine learning to find insights and relationships in text. No machine learning experience required.
75 |
76 |
Try Comprehend
77 |
Docs
78 |
79 |
80 |
81 |
82 |
Amazon Rekognition
83 |
87 |
Amazon Rekognition allows you to automatically identify objects, people, text, scenes, and activities, in images and videos. Available for batch and streaming.
88 |
89 |
Try Rekognition
90 |
Docs
91 |
92 |
93 |
94 |
95 |
Amazon Translate
96 |
100 |
Amazon Translate is a neural machine translation service that delivers fast, high-quality, and affordable language to language translation, with the ability to autodetect source language.
101 |
102 |
Try Translate
103 |
Docs
104 |
105 |
106 |
107 | {/*
108 |
109 |
110 |
111 |
Amazon Rekognition
112 |
116 |
Amazon Rekognition allows you to automatically identify objects, people, text, scenes, and activities, in images and videos. Available for batch and streaming.
117 |
Try Rekognition
118 |
Docs
119 |
120 |
121 |
122 |
123 |
Amazon Translate
124 |
128 |
Amazon Translate is a neural machine translation service that delivers fast, high-quality, and affordable language to language translation.
129 |
Try Translate
130 |
Docs
131 |
132 |
133 |
134 | */}
135 |
136 |
137 |
138 |
139 |
140 |
)
141 | }
142 | }
143 | export default Main
--------------------------------------------------------------------------------
/src/components/Transcribe.js:
--------------------------------------------------------------------------------
1 | import React, {Component} from 'react';
2 | import NavBar from '../utilities/navbar';
3 | import Footer from '../utilities/footer';
4 | import RecorderJS from 'recorder-js';
5 | import ReactAudioPlayer from 'react-audio-player';
6 | import { getAudioStream, exportBuffer } from '../utilities/audio';
7 | import TranscribeService from "aws-sdk/clients/transcribeservice";
8 | import S3Service from "aws-sdk/clients/s3";
9 | import '../App.css';
10 |
11 | var transcribeservice = new TranscribeService({apiVersion: '2017-10-26'});
12 | var s3 = new S3Service();
13 | s3.config.region = "us-east-1";
14 |
15 | class Transcribe extends Component {
16 | constructor(props){
17 | super(props);
18 | this.state = {
19 | stream: null,
20 | recording: false,
21 | recorder: null,
22 | transcriptionJobName: '',
23 | transcription:'',
24 | transcriptionJobComplete: false,
25 | transcriptionInProgress: false,
26 | s3URL:'',
27 | outputURL:''
28 | }
29 | this.startRecord = this.startRecord.bind(this);
30 | this.stopRecord = this.stopRecord.bind(this);
31 | this.transcribeAudio = this.transcribeAudio.bind(this);
32 | this.getTranscription = this.getTranscription.bind(this);
33 |
34 | }
35 |
36 | async componentDidMount() {
37 | let stream;
38 |
39 | try {
40 | stream = await getAudioStream();
41 | } catch (error) {
42 | // Users browser doesn't support audio.
43 | // Add your handler here.
44 | console.log(error);
45 | }
46 |
47 | this.setState({ stream });
48 | }
49 |
50 | startRecord() {
51 | const { stream } = this.state;
52 |
53 | const audioContext = new (window.AudioContext || window.webkitAudioContext)();
54 | const recorder = new RecorderJS(audioContext);
55 | recorder.init(stream);
56 |
57 | this.setState(
58 | {
59 | recorder,
60 | recording: true
61 | },
62 | () => {
63 | recorder.start();
64 | }
65 | );
66 | }
67 |
68 | async stopRecord() {
69 | const { recorder } = this.state;
70 |
71 | const { buffer } = await recorder.stop()
72 | const audio = exportBuffer(buffer[0]);
73 |
74 | // Process the audio here.
75 | console.log(audio);
76 |
77 | this.setState({recording: false});
78 | //send audio file to s3 bucket to prepare for transcription
79 |
80 | let currentComponent = this;
81 | var params = {
82 | ACL: "public-read",
83 | Body: audio,
84 | Bucket: "transcribe-output-js",
85 | Key: "test.wav"
86 | };
87 |
88 | s3.putObject(params, function(err, data) {
89 | if (err) console.log(err, err.stack); // an error occurred
90 | else{
91 | currentComponent.setState({s3URL: "https://s3.amazonaws.com/transcribe-output-js/" + params.Key})
92 | console.log(data); // successful response
93 | currentComponent.transcribeAudio();
94 | }
95 |
96 | });
97 | }
98 |
99 | transcribeAudio() {
100 |
101 | let job = Math.random();
102 | this.setState({transcriptionJobName: 'TRANSCRIBE_DEMO_JOB_' + job});
103 | var params = {
104 | LanguageCode: "en-US", /* required */
105 | Media: { /* required */
106 | MediaFileUri: this.state.s3URL
107 | },
108 | MediaFormat: "wav", /* required */
109 | TranscriptionJobName: this.state.transcriptionJobName, /* required*/
110 | //OutputBucketName: "transcribe-output-js"
111 | };
112 | transcribeservice.startTranscriptionJob(params, function(err, data) {
113 | if (err) console.log(err, err.stack); // an error occurred
114 | else{
115 | console.log(data); // successful response
116 | }
117 | });
118 | }
119 |
120 | givePublicAccessToTranscriptObject(key) {
121 |
122 | return new Promise((resolve, reject) => {
123 | var params = {
124 | ACL: 'public-read',
125 | Bucket: "aws-transcribe-us-east-1-prod",
126 | Key: key
127 | };
128 | s3.putObjectAcl(params, function(err, data) {
129 | if (err){
130 | console.log(err, err.stack);
131 | reject(err);
132 | }// an error occurred
133 | else{ // successful response
134 | console.log(data);
135 | console.log("public access updated");
136 | resolve(data);
137 |
138 | }
139 |
140 | });
141 | })
142 |
143 | }
144 |
145 | /*
146 | reqListener () {
147 | console.log(this.responseText);
148 | }*/
149 |
150 | getS3Object(bucket, key){
151 | var params = {
152 | Bucket: bucket,
153 | Key: key
154 | };
155 | s3.getObject(params, function(err, data) {
156 | if (err) console.log(err, err.stack); // an error occurred
157 | else console.log(data); // successful response*/
158 | });
159 |
160 | }
161 |
162 | getTranscription() {
163 | //this.setState({transcriptionJobComplete: true});
164 | var currentComponent = this;
165 | var params = {
166 | TranscriptionJobName: this.state.transcriptionJobName /* required */
167 | };
168 | transcribeservice.getTranscriptionJob(params, function(err, data) {
169 | if (err) console.log(err, err.stack); // an error occurred
170 | else{ // successful response
171 | console.log(data);
172 | if(data.TranscriptionJob.TranscriptionJobStatus === 'IN_PROGRESS'){
173 | currentComponent.setState({transcriptionInProgress: true});
174 | currentComponent.setState({transcriptionJobComplete: false});
175 | setTimeout(() => {
176 | currentComponent.getTranscription();
177 | }, 5000);
178 | }
179 | else if(data.TranscriptionJob.TranscriptionJobStatus === 'COMPLETED'){
180 | currentComponent.setState({transcriptionJobComplete: true});
181 | currentComponent.setState({transcriptionInProgress: false});
182 | let url = data.TranscriptionJob.Transcript.TranscriptFileUri
183 | let signedKey = url.split('https://s3.amazonaws.com/aws-transcribe-us-east-1-prod/')
184 | let bucket = "aws-transcribe-us-east-1-prod"
185 | let key = signedKey[1].split('?')[0]
186 | currentComponent.setState({outputURL: url});
187 |
188 | // currentComponent.givePublicAccessToTranscriptObject(key).then(data => {
189 | // currentComponent.getS3Object(bucket, key)
190 | // })
191 |
192 | /*
193 | let options = {
194 | mode: 'no-cors',
195 | method: 'GET'
196 | }
197 |
198 | var request = new XMLHttpRequest();
199 | //oReq.addEventListener("load", reqListener);
200 | request.open("GET", url);
201 | //request.setRequestHeader('X-PINGOTHER', 'pingpong');
202 | request.setRequestHeader('Access-Control-Allow-Origin', '*');
203 | request.setRequestHeader('Content-Type', 'application/xml');
204 | console.log(url)
205 | request.send();
206 | console.log(request);
207 |
208 | fetch(url, options)
209 | .then(response => console.log(response))
210 | .then(data => {
211 | // Work with JSON data here
212 | console.log(data)
213 | currentComponent.setState({transcriptionJobComplete: false});
214 | console.log(data.results.transcripts[0].transcript);
215 | currentComponent.setState({transcription: data.results.transcripts[0].transcript})
216 | })
217 | .catch(err => {
218 | // Do something for an error here
219 | })
220 | */
221 |
222 | }
223 | }
224 | });
225 |
226 | }
227 |
228 | render() {
229 | const { recording, stream } = this.state;
230 | let transcribeBtn;
231 |
232 | if(this.state.transcriptionInProgress){
233 | transcribeBtn =
234 |
235 | Transcribing...
236 |
237 | }
238 | else if(this.state.transcriptionJobComplete){
239 | transcribeBtn = Transcription Ready! Click to Download
240 | }
241 | else{
242 | transcribeBtn = Get Transcription
243 | }
244 | //let outputURL = Transcription link
245 |
246 | // Don't show record button if their browser doesn't support it.
247 | if (!stream) {
248 | return null;
249 | }
250 | return (
251 |
252 |
253 |
254 | This demo site is no longer operational. To try out the latest capabilities, please visit the respective service demo within the AWS Console.
255 |
256 |
257 |
258 |
Amazon Transcribe
259 |
260 |
261 |
262 |
Amazon Transcribe uses advanced machine learning technologies to recognize speech in audio files and transcribe them into text. You can use Amazon Transcribe to convert audio to text and to create applications that incorporate the content of audio files. For example, you can transcribe the audio track from a video recording to create closed captioning for the video.
263 |
264 |
In this example, we're going to show how easy it is to record audio, upload it to Amazon S3, and use Amazon Transcribe to perform a batch transcription job.
265 |
This demo doesn't include the realtime transcription functionality of Amazon Transcribe, but you can find a demo that does here.
266 |
267 | Methods:
268 |
272 |
273 |
274 |
275 |
276 |
277 |
Step 1: Record Audio
278 | {
281 | recording ? this.stopRecord() : this.startRecord();
282 | }}
283 | >
284 | {recording ? 'Stop Recording' : 'Start Recording'}
285 |
286 |
287 |
288 |
Step 2: Upload to S3
289 |
294 |
295 |
296 |
Step 3: Get transcription
297 | {transcribeBtn}
298 |
299 |
300 |
301 |
302 |
303 |
304 | )
305 | }
306 | }
307 |
308 | export default Transcribe;
--------------------------------------------------------------------------------
/src/components/Comprehend.js:
--------------------------------------------------------------------------------
1 | import React, {Component} from 'react'
2 | import NavBar from '../utilities/navbar';
3 | import Footer from '../utilities/footer';
4 | var AWS = require('aws-sdk');
5 | AWS.config.region = 'us-east-1';
6 | AWS.config.credentials = new AWS.CognitoIdentityCredentials({IdentityPoolId: 'us-east-1:1956382a-b3f6-472c-9a8d-3a246853c917'});
7 |
8 | class Comprehend extends Component {
9 | constructor(props){
10 | super(props);
11 | //resultEntities,resultEntitiesMessage, resultSyntax, resultSyntaxMessage, resultKeyPhrases, resultKeyPhrasesMessage
12 | this.state = {
13 | text: '',
14 | resultSentimentMessage: '',
15 | resultSentiment: '',
16 | resultSentimentScore: '',
17 | resultEntities: '',
18 | resultEntitiesMessage: '',
19 | resultSyntax: [],
20 | resultSyntaxMessage: '',
21 | resultKeyPhrases: [],
22 | resultKeyPhrasesMessage: [],
23 | resultContainsPiiEntitiesMessage: '',
24 | resultContainsPiiEntities: [],
25 | resultDetectPiiEntitiesMessage: '',
26 | resultDetectPiiEntities: [],
27 | }
28 | this.onChangeText = this.onChangeText.bind(this);
29 | this.sendTextToComprehend = this.sendTextToComprehend.bind(this);
30 | }
31 |
32 | onChangeText(e){
33 | this.setState({text: e.target.value});
34 | }
35 |
36 | sendTextToComprehend = () => {
37 | // API call params
38 | var comprehendParams = {
39 | LanguageCode: "en",
40 | Text: ""
41 | };
42 | comprehendParams.Text = this.state.text;
43 |
44 | // instantiate comprehend client
45 | var comprehend = new AWS.Comprehend({apiVersion: '2017-11-27'});
46 | let currentComponent = this;
47 |
48 | // Detect Sentiment
49 | if (!!comprehendParams.Text){
50 | comprehend.detectSentiment(comprehendParams, function (err, data){
51 | if (err) {
52 | currentComponent.setState({resultSentimentMessage: err.message});
53 | currentComponent.setState({resultSentiment: ""});
54 | currentComponent.setState({resultSentimentScore: ""});
55 | }
56 | else {
57 | currentComponent.setState({resultSentimentMessage: ">>> Sentiment analyzed!"});
58 | currentComponent.setState({resultSentiment: data.Sentiment});
59 | currentComponent.setState({resultSentimentScore: JSON.stringify(data.SentimentScore)});
60 | }
61 | document.getElementById("chck1").checked = true;
62 | });
63 |
64 | // Detect Entities -- Entities[i] .text, .type, .score
65 | comprehend.detectEntities(comprehendParams, function (err, data){
66 | if (err) {
67 | currentComponent.setState({resultEntitiesMessage: err.message});
68 | currentComponent.setState({resultEntities: ""})
69 | }
70 | else {
71 | currentComponent.setState({resultEntitiesMessage: ">>> Entities analyzed!"})
72 | currentComponent.setState({resultEntities: JSON.stringify(data.Entities)});
73 | //currentComponent.setState({resultEntitiesScores: JSON.stringify(data.SentimentScore)});
74 | }
75 | document.getElementById("chck2").checked = true;
76 | });
77 |
78 | // Detect Syntax -- Entities[i] .text, .type, .score
79 | comprehend.detectSyntax(comprehendParams, function (err, data){
80 | if (err) {
81 | currentComponent.setState({resultSyntaxMessage: err.message});
82 | currentComponent.setState({resultSyntax: ""})
83 | }
84 | else {
85 | currentComponent.setState({resultSyntaxMessage: ">>> Syntax analyzed!"})
86 | currentComponent.setState({resultSyntax: JSON.stringify(data.SyntaxTokens)});
87 | }
88 | document.getElementById("chck3").checked = true;
89 | });
90 |
91 | //Detect Key Phrases -- KeyPhrases[n] .Text, .Score
92 | comprehend.detectKeyPhrases(comprehendParams, function (err, data){
93 | if (err) {
94 | currentComponent.setState({resultKeyPhrasesMessage: err.message});
95 | currentComponent.setState({resultKeyPhrases: ""})
96 | }
97 | else {
98 | currentComponent.setState({resultKeyPhrasesMessage: ">>> KeyPhrases analyzed!"})
99 | currentComponent.setState({resultKeyPhrases: JSON.stringify(data.KeyPhrases)});
100 | }
101 | document.getElementById("chck4").checked = true;
102 | });
103 |
104 | //Check if text contains PII entities, return types
105 | comprehend.containsPiiEntities(comprehendParams, function (err, data){
106 | if (err) {
107 | currentComponent.setState({resultContainsPiiEntitiesMessage: err.message});
108 | currentComponent.setState({resultContainsPiiEntities: ""});
109 | currentComponent.setState({resultContainsPiiEntitiesScore: ""});
110 | }
111 | else {
112 | currentComponent.setState({resultContainsPiiEntitiesMessage: ">>> Contains PII operation complete!"});
113 | currentComponent.setState({resultContainsPiiEntities: JSON.stringify(data.Labels)});
114 | }
115 | document.getElementById("chck5").checked = true;
116 | });
117 |
118 | //Detect particular instances of PII entities, return locations, types, score
119 | comprehend.detectPiiEntities(comprehendParams, function (err, data){
120 | if (err) {
121 | currentComponent.setState({resultDetectPiiEntitiesMessage: err.message});
122 | currentComponent.setState({resultDetectPiiEntities: ""});
123 | //currentComponent.setState({resultDetectPiiEntitiesScore: ""});
124 | }
125 | else {
126 | currentComponent.setState({resultDetectPiiEntitiesMessage: ">>> Detect PII operation complete!"});
127 | currentComponent.setState({resultDetectPiiEntities: JSON.stringify(data.Entities)});
128 | }
129 | document.getElementById("chck6").checked = true;
130 | });
131 | }
132 | }
133 |
134 |
135 | render() {
136 | let sentimentStatus, sentiment, sentimentScore, entities, entitiesStatus, syntax, syntaxStatus, keyPhrases, keyPhrasesStatus, containsPiiStatus, piiEntityLabels, detectPiiStatus, piiEntities;
137 | if(this.state.resultMessage !== ''){
138 | sentimentStatus = {this.state.resultSentimentMessage}
139 | sentiment = {this.state.resultSentiment}
140 | sentimentScore = {this.state.resultSentimentScore}
141 |
142 | entitiesStatus = {this.state.resultEntitiesMessage}
143 | entities = {this.state.resultEntities}
144 |
145 | syntaxStatus = {this.state.resultSyntaxMessage}
146 | syntax = {this.state.resultSyntax}
147 |
148 | keyPhrasesStatus = {this.state.resultKeyPhrasesMessage}
149 | keyPhrases = {this.state.resultKeyPhrases}
150 |
151 | containsPiiStatus = {this.state.resultContainsPiiEntitiesMessage}
152 | piiEntityLabels = {this.state.resultContainsPiiEntities}
153 |
154 | detectPiiStatus = {this.state.resultDetectPiiEntitiesMessage}
155 | piiEntities = {this.state.resultDetectPiiEntities}
156 | }
157 | return (
158 |
159 |
160 |
161 | This demo site is no longer operational. To try out the latest capabilities, please visit the respective service demo within the AWS Console.
162 |
163 |
164 |
165 |
166 |
Amazon Comprehend
167 |
168 |
169 |
170 |
Amazon Comprehend uses natural language processing (NLP) to extract insights about the content of documents. Amazon Comprehend processes any text file in UTF-8 format. It develops insights by recognizing the entities, key phrases, language, sentiments, PII (personally identifiable information), and other common elements in a document. Use Amazon Comprehend to create new products based on understanding the structure of documents. For example, using Amazon Comprehend you can search social networking feeds for mentions of products or scan an entire document repository for key phrases.
171 |
In this example, we're going to show how easy it is to send text to Amazon Comprehend to understand text sentiment, identify entities and key phrases, and assess syntax tokens.
172 |
173 | Methods:
174 |
175 | sendTextToComprehend(): Send text to Comprehend, returning all relevant results in the response body.
176 |
detectSentiment()
177 |
detectEntities()
178 |
detectKeyPhrases()
179 |
detectSyntax()
180 |
containsPiiEntities(), detectPiiEntities()
181 |
182 |
183 |
184 |
185 |
Step 1: Insert Text
186 |
192 |
193 |
194 |
Results:
195 | {/* test start */}
196 |
197 |
198 |
199 |
200 |
201 |
Sentiment
202 |
203 | {sentiment}
204 | {sentimentScore}
205 |
206 |
207 |
208 |
209 |
Entities
210 |
211 | {entities}
212 |
213 |
214 |
215 |
216 |
Key Phrases
217 |
218 | {keyPhrases}
219 |
220 |
221 |
222 |
223 |
Syntax Tokens
224 |
225 | {syntax}
226 |
227 |
228 |
229 |
230 |
PII Types in Passage
231 |
232 | {piiEntityLabels}
233 |
234 |
235 |
236 |
237 |
Detect PII Entities
238 |
239 | {piiEntities}
240 |
241 |
242 |
243 |
244 |
245 |
246 |
247 |
248 |
249 |
250 |
// app end
251 | );
252 | }
253 | }
254 | export default Comprehend;
--------------------------------------------------------------------------------
/src/components/Translate.js:
--------------------------------------------------------------------------------
1 | import React, {Component} from 'react'
2 | import NavBar from '../utilities/navbar';
3 | import Footer from '../utilities/footer';
4 | var AWS = require('aws-sdk');
5 | AWS.config.region = 'us-east-1';
6 | AWS.config.credentials = new AWS.CognitoIdentityCredentials({IdentityPoolId: 'us-east-1:1956382a-b3f6-472c-9a8d-3a246853c917'});
7 |
8 |
9 | class Translate extends Component {
10 | constructor(props){
11 | super(props);
12 |
13 | this.state = {
14 | text: '',
15 | resultMessage: '',
16 | sourceLang: 'auto',
17 | targetLang: 'es',
18 | resultTranslation: ''
19 | }
20 | this.onChangeText = this.onChangeText.bind(this);
21 | this.onChangeSourceLanguage = this.onChangeSourceLanguage.bind(this);
22 | this.onChangeTargetLanguage = this.onChangeTargetLanguage.bind(this);
23 | this.sendTextToTranslate = this.sendTextToTranslate.bind(this);
24 |
25 | }
26 |
27 | onChangeText(e){
28 | this.setState({text: e.target.value});
29 | }
30 |
31 | onChangeSourceLanguage(e){
32 | this.setState({sourceLang: e.target.value});
33 | }
34 |
35 | onChangeTargetLanguage(e){
36 | this.setState({targetLang: e.target.value});
37 | }
38 |
39 | sendTextToTranslate = () => {
40 | // API call params
41 | // full list of language codes available here: https://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/Translate.html#translateText-property
42 | var TranslateParams = {
43 | SourceLanguageCode: "",
44 | TargetLanguageCode: "",
45 | Text: ""
46 | };
47 | TranslateParams.Text = this.state.text;
48 | TranslateParams.SourceLanguageCode = this.state.sourceLang;
49 | TranslateParams.TargetLanguageCode = this.state.targetLang;
50 |
51 | // instantiate Translate client
52 | var Translate = new AWS.Translate({apiVersion: '2017-07-01'});
53 | let currentComponent = this;
54 |
55 | // call translateText method
56 | if (!!TranslateParams.Text) {
57 | Translate.translateText(TranslateParams, function (err, data){
58 | if (err) {
59 | currentComponent.setState({resultMessage: err.message});
60 | currentComponent.setState({resultTranslation: 'No translation occurred - check the error!'})
61 | }
62 | else {
63 | currentComponent.setState({resultTranslation: data.TranslatedText});
64 | currentComponent.setState({resultMessage: "Text translation successful!"})
65 | }
66 | document.getElementById("chck1").checked = true;
67 | });
68 | };
69 |
70 | }
71 |
72 |
73 | render() {
74 | let result, translation;
75 | if(this.state.resultMessage !== ''){
76 | result = {this.state.resultMessage}
77 | translation = {this.state.resultTranslation}
78 | }
79 | return (
80 |
81 |
82 |
83 | This demo site is no longer operational. To try out the latest capabilities, please visit the respective service demo within the AWS Console.
84 |
85 |
86 |
87 |
88 |
Amazon Translate
89 |
90 |
91 |
92 |
Amazon Translate is a neural machine translation service that delivers fast, high-quality, and affordable language translation. Neural machine translation is a form of language translation automation that uses deep learning models to deliver more accurate and more natural sounding translation than traditional statistical and rule-based translation algorithms. Amazon Translate allows you to localize content - such as websites and applications - for international users, and to easily translate large volumes of text efficiently.
93 |
94 |
In this example, we're going to show how easy it is to translate text from one language to another using Amazon Translate.
95 |
96 | Methods:
97 |
translateText() : Initialize a translation from sample text for a given target language
98 |
99 |
100 |
101 |
102 |
Step 1: Enter Text
103 |
104 |
105 |
106 |
107 |
108 |
109 |
110 |
Step 2: Choose Languages
111 |
112 |
113 |
114 |
115 | Autodetect (Powered by Amazon Comprehend)
116 | Afrikaans
117 | Albanian
118 | Amharic
119 | Arabic
120 | Azerbaijani
121 | Bengali
122 | Bosnian
123 | Bulgarian
124 | Chinese (Simplified)
125 | Chinese (Traditional)
126 | Croatian
127 | Czech
128 | Danish
129 | Dari
130 | Dutch
131 | English
132 | Estonian
133 | Finnish
134 | French
135 | French (Canadian)
136 | Georgian
137 | German
138 | Greek
139 | Hausa
140 | Hebrew
141 | Hindi
142 | Hungarian
143 | Indonesian
144 | Italian
145 | Japanese
146 | Korean
147 | Latvian
148 | Malay
149 | Norwegian
150 | Persian
151 | Pashto
152 | Polish
153 | Portugese
154 | Romanian
155 | Russian
156 | Serbian
157 | Slovak
158 | Slovenian
159 | Somali
160 | Spanish
161 | Swahili
162 | Swedish
163 | Tagalog
164 | Tamil
165 | Thai
166 | Turkish
167 | Ukranian
168 | Urdu
169 | Vietnamese
170 |
171 |
172 |
173 |
174 |
175 |
176 |
177 | Afrikaans
178 | Albanian
179 | Amharic
180 | Arabic
181 | Azerbaijani
182 | Bengali
183 | Bosnian
184 | Bulgarian
185 | Chinese (Simplified)
186 | Chinese (Traditional)
187 | Croatian
188 | Czech
189 | Danish
190 | Dari
191 | Dutch
192 | English
193 | Estonian
194 | Finnish
195 | French
196 | French (Canadian)
197 | Georgian
198 | German
199 | Greek
200 | Hausa
201 | Hebrew
202 | Hindi
203 | Hungarian
204 | Indonesian
205 | Italian
206 | Japanese
207 | Korean
208 | Latvian
209 | Malay
210 | Norwegian
211 | Persian
212 | Pashto
213 | Polish
214 | Portugese
215 | Romanian
216 | Russian
217 | Serbian
218 | Slovak
219 | Slovenian
220 | Somali
221 | Spanish
222 | Swahili
223 | Swedish
224 | Tagalog
225 | Tamil
226 | Thai
227 | Turkish
228 | Ukranian
229 | Urdu
230 | Vietnamese
231 |
232 |
233 |
234 |
Translate text with Translate!
235 |
236 |
237 |
Result:
238 |
239 |
240 |
241 |
Translation
242 |
243 | {result}
244 | {translation}
245 |
246 |
247 |
248 |
249 |
250 |
251 |
252 |
253 |
254 | );
255 | }
256 | }
257 | export default Translate;
--------------------------------------------------------------------------------
/src/components/Polly.js:
--------------------------------------------------------------------------------
1 | import React, {Component} from 'react';
2 | import NavBar from '../utilities/navbar';
3 | import Footer from '../utilities/footer';
4 | import ReactAudioPlayer from 'react-audio-player';
5 | import $ from 'jquery';
6 | var AWS = require('aws-sdk');
7 | AWS.config.region = 'us-east-1';
8 | AWS.config.credentials = new AWS.CognitoIdentityCredentials({IdentityPoolId: 'us-east-1:1956382a-b3f6-472c-9a8d-3a246853c917'});
9 |
10 | class Polly extends Component {
11 | constructor(props){
12 | super(props);
13 |
14 | this.state = {
15 | text: '',
16 | voiceId: '',
17 | engine: '',
18 | resultMessage: '',
19 | pollyUrl: ''
20 | }
21 | this.onChangeText = this.onChangeText.bind(this);
22 | this.onChangeVoiceId = this.onChangeVoiceId.bind(this);
23 | this.onChangeEngine = this.onChangeEngine.bind(this);
24 | this.sendTextToPolly = this.sendTextToPolly.bind(this);
25 | }
26 |
27 | onChangeVoiceId(e){
28 | this.setState({voiceId: e.target.value});
29 | }
30 |
31 | onChangeEngine(e){
32 | this.setState({engine: e.target.value});
33 | }
34 |
35 | onChangeText(e){
36 | this.setState({text: e.target.value});
37 | }
38 |
39 |
40 | sendTextToPolly = () => {
41 | // Create synthesizeSpeech params JSON
42 | var speechParams = {
43 | OutputFormat: "mp3",
44 | //SampleRate: "16000",
45 | Text: "",
46 | TextType: "text",
47 | Engine: "standard", //standard, neural
48 | VoiceId: "Justin"
49 | };
50 |
51 | speechParams.Text = this.state.text;
52 | speechParams.VoiceId = this.state.voiceId;
53 | speechParams.Engine = this.state.engine;
54 |
55 | //your polly call goes here, this is extra credit!
56 | // Create the Polly service object and presigner object
57 | var polly = new AWS.Polly({apiVersion: '2016-06-10'});
58 | var signer = new AWS.Polly.Presigner(speechParams, polly)
59 | let currentComponent = this;
60 | // Create presigned URL of synthesized speech file
61 | signer.getSynthesizeSpeechUrl(speechParams, (error, url) => {
62 | if (error) {
63 | currentComponent.setState({resultMessage: error.message});
64 |
65 | } else {
66 | //audioSource.src = url;
67 | currentComponent.setState({pollyUrl: url});
68 | currentComponent.setState({resultMessage: "Speech ready to play"});
69 |
70 | }
71 | });
72 |
73 | }
74 |
75 | render() {
76 | let result;
77 | if(this.state.resultMessage !== ''){
78 | result = {this.state.resultMessage}
79 | }
80 |
81 | var $topSelect = $('select[name="voice-choose"]');
82 | var $nestedSelects = $('select[name!="voice-choose"]');
83 | $nestedSelects.hide();
84 | showApplicableSelect();
85 | $topSelect.change(showApplicableSelect);
86 | function showApplicableSelect() {
87 | $nestedSelects.hide();
88 | $('select[name="' + $topSelect.val() + '"]').show();
89 | }
90 |
91 | return (
92 |
93 |
94 |
95 | This demo site is no longer operational. To try out the latest capabilities, please visit the respective service demo within the AWS Console.
96 |
97 |
98 |
99 |
100 |
Amazon Polly
101 |
102 |
103 |
104 |
Amazon Polly is a cloud service that converts text into lifelike speech. You can use Amazon Polly to develop applications that increase engagement and accessibility. Amazon Polly supports multiple languages and includes a variety of lifelike voices, so you can build speech-enabled applications that work in multiple locations and use the ideal voice for your customers. With Amazon Polly, you only pay for the text you synthesize. You can also cache and replay Amazon Polly’s generated speech at no additional cost.
105 |
106 |
For a full breakdown of the available voices and their respective locales, view the docs here .
107 |
108 |
In this example, we're going to show how easy it is to send text to Amazon Polly to synthesize audio.
109 |
110 | Methods:
111 |
112 |
113 |
114 |
115 |
116 |
Step 1: Choose a voice
117 |
118 |
119 |
120 | Choose an engine:
121 | Neural
122 | Standard
123 |
124 |
125 | Choose a voice:
126 |
127 | Ivy
128 | Joanna
129 | Kendra
130 | Kimberly
131 | Salli
132 | Joey
133 | Justin
134 | Matthew
135 |
136 | Zeina
137 |
138 | Zhiyu
139 |
140 | Naja
141 | Mads
142 |
143 | Lotte
144 | Ruben
145 |
146 | Russell
147 | Nicole
148 |
149 | Amy
150 | Emma
151 | Brian
152 |
153 | Aditi
154 | Raveena
155 |
156 | Geraint
157 |
158 | Celine
159 | Léa
160 | Mathieu
161 |
162 | Chantal
163 |
164 | Marlene
165 | Vicki
166 | Hans
167 |
168 | Aditi
169 |
170 | Dora
171 | Karl
172 |
173 | Giorgio
174 | Bianca
175 |
176 | Mizuki
177 | Takumi
178 |
179 | Seoyeon
180 |
181 | Liv
182 |
183 | Ewa
184 | Maja
185 | Jacek
186 | Jan
187 |
188 | Vitoria
189 | Ricardo
190 |
191 | Ines
192 | Cristiano
193 |
194 | Carmen
195 |
196 | Tatyana
197 | Maxim
198 |
199 | Conchita
200 | Lucia
201 | Enrique
202 |
203 | Mia
204 |
205 | Lupe
206 | Penelope
207 | Miguel
208 |
209 | Astrid
210 |
211 | Filiz
212 |
213 | Gwyneth
214 |
215 |
216 | Choose a voice:
217 |
218 | Ivy
219 | Joanna
220 | Kendra
221 | Kimberly
222 | Salli
223 | Joey
224 | Justin
225 | Matthew
226 |
227 | Amy
228 | Emma
229 | Brian
230 |
231 | Lupe
232 |
233 | Camila
234 |
235 |
236 |
237 |
238 |
239 |
Step 2: Write text
240 |
241 |
242 |
243 |
244 | Voice My Message Using Polly
245 |
246 |
247 |
248 |
Step 3: Get Result
249 |
254 | {result}
255 |
256 |
257 | {/*}
258 | Placeholder for live json pane of glass feature
259 |
260 |
261 |
API Call:
262 |
263 |
264 |
265 | print
266 | some example
267 | json for the API request
268 |
269 |
*/}
270 |
271 |
272 |
273 |
274 | );
275 | };
276 |
277 | }
278 |
279 | export default Polly;
--------------------------------------------------------------------------------