├── .nojekyll
├── docs
├── .nojekyll
├── styleguide
│ ├── contributor-notes.md
│ ├── maintenance-notes.md
│ ├── design-guidelines.md
│ ├── development-guidelines.md
│ └── reference-guidelines.md
├── _media
│ ├── GitHub-Mark-120px-plus.png
│ ├── reference__header-dcgan.png
│ ├── reference__header-kmeans.png
│ ├── reference__header-unet.png
│ ├── reference__header-yolo.png
│ ├── reference__header-bodypix.png
│ ├── reference__header-charrnn.png
│ ├── reference__header-faceapi.png
│ ├── reference__header-pix2pix.png
│ ├── reference__header-posenet.jpg
│ ├── reference__header-word2vec.png
│ ├── reference__header-sentiment.png
│ ├── reference__header-sketchrnn.png
│ ├── reference__header-styletransfer.png
│ ├── reference__header-imageClassifier.png
│ ├── reference__header-knn-classifier.png
│ ├── reference__header-neural-network.png
│ ├── reference__header-pitch-detection.png
│ ├── reference__header-sound-classifier.png
│ └── reference__header-feature-extractor.png
├── reference
│ ├── index.md
│ ├── sentiment.md
│ ├── kmeans.md
│ ├── cvae.md
│ ├── pix2pix.md
│ ├── unet.md
│ ├── style-transfer.md
│ ├── object-detector.md
│ ├── sketchrnn.md
│ ├── pitch-detection.md
│ ├── yolo.md
│ ├── sound-classifier.md
│ ├── dcgan.md
│ └── charrnn.md
├── faq.md
├── tutorials
│ ├── local-web-server.md
│ └── promises-and-callbacks.md
├── _sidebar.md
├── index.html
└── README.md
├── src
├── Pix2pix
│ └── index_test.js
├── KNNClassifier
│ └── index_test.js
├── PitchDetection
│ └── index.test.js
├── utils
│ ├── modelLoader.js
│ ├── callcallback.js
│ ├── sample.js
│ ├── io.js
│ ├── gettopkclasses.js
│ ├── Video.js
│ ├── p5PreloadHelper.js
│ ├── COCO_CLASSES.js
│ ├── random.js
│ ├── checkpointLoaderPix2pix.js
│ ├── checkpointLoader.js
│ ├── p5Utils.js
│ └── imageUtilities.js
├── NeuralNetwork
│ ├── NeuralNetworkDefaults.js
│ ├── index_test.js
│ └── NeuralNetworkVis.js
├── Sentiment
│ ├── index_test.js
│ └── index.js
├── ObjectDetector
│ ├── index_test.js
│ ├── YOLO
│ │ ├── index_test.js
│ │ └── postprocess.js
│ ├── CocoSsd
│ │ └── index_test.js
│ └── index.js
├── StyleTransfer
│ └── index_test.js
├── KMeans
│ └── index_test.js
├── SoundClassifier
│ ├── speechcommands.js
│ └── index.js
├── FeatureExtractor
│ ├── index_test.js
│ └── index.js
├── PoseNet
│ └── index_test.js
├── index.js
├── CharRNN
│ └── index_test.js
├── SketchRNN
│ ├── models.js
│ └── index.js
├── BodyPix
│ ├── BODYPIX_PALETTE.js
│ └── index_test.js
├── FaceApi
│ └── index_test.js
├── ImageClassifier
│ ├── doodlenet.js
│ ├── darknet.js
│ └── index_test.js
├── Word2vec
│ ├── index_test.js
│ └── index.js
└── CVAE
│ └── index.js
├── assets
├── bird.jpg
├── header.png
├── ml5-localhost.png
└── ml5-webpack-build.png
├── .vscode
└── settings.json
├── .travis.yml
├── scripts
├── test-travis.sh
├── updatePackageVersion.js
├── updateReadme.js
└── updateDocVersions.js
├── .eslintrc.json
├── .gitignore
├── webpack.prod.babel.js
├── webpack.dev.babel.js
├── webpack.common.babel.js
├── LICENSE
├── .github
├── PULL_REQUEST_TEMPLATE.md
└── ISSUE_TEMPLATE.md
├── webpack.test.babel.js
├── karma.conf.js
└── package.json
/.nojekyll:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/docs/.nojekyll:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/src/Pix2pix/index_test.js:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/src/KNNClassifier/index_test.js:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/assets/bird.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/adaptive/ml5-library/development/assets/bird.jpg
--------------------------------------------------------------------------------
/assets/header.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/adaptive/ml5-library/development/assets/header.png
--------------------------------------------------------------------------------
/assets/ml5-localhost.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/adaptive/ml5-library/development/assets/ml5-localhost.png
--------------------------------------------------------------------------------
/docs/styleguide/contributor-notes.md:
--------------------------------------------------------------------------------
1 | # Contributor Notes (External)
2 |
3 | coming soon - notes for contributors
--------------------------------------------------------------------------------
/assets/ml5-webpack-build.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/adaptive/ml5-library/development/assets/ml5-webpack-build.png
--------------------------------------------------------------------------------
/docs/styleguide/maintenance-notes.md:
--------------------------------------------------------------------------------
1 | # Maintenance Guidelines & DevOps (Internal)
2 |
3 | coming soon - notes for ml5 team
--------------------------------------------------------------------------------
/.vscode/settings.json:
--------------------------------------------------------------------------------
1 | {
2 | "prettier.trailingComma": "all",
3 | "prettier.printWidth": 100,
4 | "prettier.arrowParens": "avoid"
5 | }
--------------------------------------------------------------------------------
/docs/_media/GitHub-Mark-120px-plus.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/adaptive/ml5-library/development/docs/_media/GitHub-Mark-120px-plus.png
--------------------------------------------------------------------------------
/docs/_media/reference__header-dcgan.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/adaptive/ml5-library/development/docs/_media/reference__header-dcgan.png
--------------------------------------------------------------------------------
/docs/_media/reference__header-kmeans.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/adaptive/ml5-library/development/docs/_media/reference__header-kmeans.png
--------------------------------------------------------------------------------
/docs/_media/reference__header-unet.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/adaptive/ml5-library/development/docs/_media/reference__header-unet.png
--------------------------------------------------------------------------------
/docs/_media/reference__header-yolo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/adaptive/ml5-library/development/docs/_media/reference__header-yolo.png
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | language: node_js
2 | node_js:
3 | - "8"
4 | - "10"
5 | git:
6 | depth: 5
7 | script:
8 | - yarn build
9 | - yarn test-travis
--------------------------------------------------------------------------------
/docs/_media/reference__header-bodypix.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/adaptive/ml5-library/development/docs/_media/reference__header-bodypix.png
--------------------------------------------------------------------------------
/docs/_media/reference__header-charrnn.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/adaptive/ml5-library/development/docs/_media/reference__header-charrnn.png
--------------------------------------------------------------------------------
/docs/_media/reference__header-faceapi.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/adaptive/ml5-library/development/docs/_media/reference__header-faceapi.png
--------------------------------------------------------------------------------
/docs/_media/reference__header-pix2pix.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/adaptive/ml5-library/development/docs/_media/reference__header-pix2pix.png
--------------------------------------------------------------------------------
/docs/_media/reference__header-posenet.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/adaptive/ml5-library/development/docs/_media/reference__header-posenet.jpg
--------------------------------------------------------------------------------
/docs/_media/reference__header-word2vec.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/adaptive/ml5-library/development/docs/_media/reference__header-word2vec.png
--------------------------------------------------------------------------------
/docs/_media/reference__header-sentiment.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/adaptive/ml5-library/development/docs/_media/reference__header-sentiment.png
--------------------------------------------------------------------------------
/docs/_media/reference__header-sketchrnn.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/adaptive/ml5-library/development/docs/_media/reference__header-sketchrnn.png
--------------------------------------------------------------------------------
/docs/_media/reference__header-styletransfer.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/adaptive/ml5-library/development/docs/_media/reference__header-styletransfer.png
--------------------------------------------------------------------------------
/docs/_media/reference__header-imageClassifier.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/adaptive/ml5-library/development/docs/_media/reference__header-imageClassifier.png
--------------------------------------------------------------------------------
/docs/_media/reference__header-knn-classifier.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/adaptive/ml5-library/development/docs/_media/reference__header-knn-classifier.png
--------------------------------------------------------------------------------
/docs/_media/reference__header-neural-network.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/adaptive/ml5-library/development/docs/_media/reference__header-neural-network.png
--------------------------------------------------------------------------------
/docs/_media/reference__header-pitch-detection.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/adaptive/ml5-library/development/docs/_media/reference__header-pitch-detection.png
--------------------------------------------------------------------------------
/docs/_media/reference__header-sound-classifier.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/adaptive/ml5-library/development/docs/_media/reference__header-sound-classifier.png
--------------------------------------------------------------------------------
/docs/_media/reference__header-feature-extractor.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/adaptive/ml5-library/development/docs/_media/reference__header-feature-extractor.png
--------------------------------------------------------------------------------
/scripts/test-travis.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | if [[ $(node -v) = *v10* ]]; then
4 | karma start \
5 | --browsers='bs_chrome_mac' \
6 | --singleRun --reporters='dots,progress,BrowserStack'
7 | fi
--------------------------------------------------------------------------------
/.eslintrc.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": ["airbnb-base", "prettier"],
3 | "globals": {
4 | "fetch": false,
5 | "document": true
6 | },
7 | "rules":{
8 | "no-console":0
9 | },
10 | "env": {
11 | "browser": true,
12 | "jasmine": true
13 | }
14 | }
15 |
--------------------------------------------------------------------------------
/docs/styleguide/design-guidelines.md:
--------------------------------------------------------------------------------
1 | # Design Guidelines
2 |
3 | coming soon
4 |
5 | ## Colors
6 |
7 |
8 | ## Typography
9 |
10 |
11 | ## Logos
12 |
13 |
14 | ## Iconography
15 |
16 |
17 |
18 | ## Illustration
19 |
20 |
21 |
22 |
35 |
--------------------------------------------------------------------------------
/src/PitchDetection/index.test.js:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2018 ml5
2 | //
3 | // This software is released under the MIT License.
4 | // https://opensource.org/licenses/MIT
5 |
6 | const { pitchDetection } = ml5;
7 |
8 | describe('pitchDetection', () => {
9 | let pitch;
10 |
11 | // beforeAll(async () => {
12 | // });
13 |
14 | // it('instantiates a pitchDetection', async () => {
15 | // });
16 | });
17 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .env
2 | dev
3 | examples/es6/node_modules
4 | experiments/node_modules
5 | node_modules
6 | **/npm-debug.log
7 | *.DS_STORE
8 | experiments
9 | manual-test
10 | training/lstm/data/t
11 | .cache
12 | /public
13 | /static
14 | /dist
15 |
16 | website/translated_docs
17 | website/build/
18 | website/yarn.lock
19 | website/node_modules
20 |
21 | website/i18n/*
22 | !website/i18n/en.json
23 |
24 | yarn-error.log
25 |
--------------------------------------------------------------------------------
/src/utils/modelLoader.js:
--------------------------------------------------------------------------------
1 | function isAbsoluteURL(str) {
2 | const pattern = new RegExp('^(?:[a-z]+:)?//', 'i');
3 | return !!pattern.test(str);
4 | }
5 |
6 | function getModelPath(absoluteOrRelativeUrl) {
7 | const modelJsonPath = isAbsoluteURL(absoluteOrRelativeUrl) ? absoluteOrRelativeUrl : window.location.pathname + absoluteOrRelativeUrl
8 | return modelJsonPath;
9 | }
10 |
11 | export default {
12 | isAbsoluteURL,
13 | getModelPath
14 | }
--------------------------------------------------------------------------------
/src/NeuralNetwork/NeuralNetworkDefaults.js:
--------------------------------------------------------------------------------
1 | const DEFAULTS = {
2 | task: 'regression',
3 | activationHidden: 'sigmoid',
4 | activationOutput: 'sigmoid',
5 | debug: false,
6 | learningRate: 0.25,
7 | inputs: 2,
8 | outputs: 1,
9 | noVal: null,
10 | hiddenUnits: 16,
11 | modelMetrics: ['accuracy'],
12 | modelLoss: 'meanSquaredError',
13 | modelOptimizer: null,
14 | batchSize: 64,
15 | epochs: 32,
16 | returnTensors: false,
17 | }
18 |
19 | export default DEFAULTS;
--------------------------------------------------------------------------------
/src/utils/callcallback.js:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2018 ml5
2 | //
3 | // This software is released under the MIT License.
4 | // https://opensource.org/licenses/MIT
5 |
6 | export default function callCallback(promise, callback) {
7 | if (callback) {
8 | promise
9 | .then((result) => {
10 | callback(undefined, result);
11 | return result;
12 | })
13 | .catch((error) => {
14 | callback(error);
15 | return error;
16 | });
17 | }
18 | return promise;
19 | }
20 |
--------------------------------------------------------------------------------
/webpack.prod.babel.js:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2018 ml5
2 | //
3 | // This software is released under the MIT License.
4 | // https://opensource.org/licenses/MIT
5 |
6 | import merge from 'webpack-merge';
7 | import common from './webpack.common.babel';
8 | import UglifyJSPlugin from 'uglifyjs-webpack-plugin';
9 |
10 | export default merge(common, {
11 | mode: 'production',
12 | devtool: 'source-map',
13 | output: {
14 | filename: 'ml5.min.js'
15 | },
16 | plugins: [
17 | new UglifyJSPlugin({
18 | sourceMap: true
19 | })
20 | ]
21 | })
22 |
--------------------------------------------------------------------------------
/src/utils/sample.js:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2018 ml5
2 | //
3 | // This software is released under the MIT License.
4 | // https://opensource.org/licenses/MIT
5 |
6 | // Utils for sampling
7 |
8 | // Sample from a distrubution
9 | const sampleFromDistribution = (input) => {
10 | const randomValue = Math.random();
11 | let sum = 0;
12 | let result;
13 | for (let j = 0; j < input.length; j += 1) {
14 | sum += input[j];
15 | if (randomValue < sum) {
16 | result = j;
17 | break;
18 | }
19 | }
20 | return result;
21 | };
22 |
23 | export default sampleFromDistribution;
24 |
--------------------------------------------------------------------------------
/src/Sentiment/index_test.js:
--------------------------------------------------------------------------------
1 | const { sentiment } = ml5;
2 |
3 | describe('Sentiment', ()=>{
4 | let model;
5 |
6 | beforeAll(async () => {
7 | jasmine.DEFAULT_TIMEOUT_INTERVAL = 10000;
8 | model = await sentiment('moviereviews').ready;
9 | });
10 |
11 | it("Model should be ready",()=> expect(model.ready).toBeTruthy());
12 |
13 | it("Happy has a sentiment score greater than 0.5", ()=>{
14 | expect(model.predict('Happy').score).toBeGreaterThan(0.5);
15 | });
16 |
17 | it("Terrible has a sentiment score less than 0.5", ()=>{
18 | expect(model.predict('Terrible').score).toBeLessThan(0.5);
19 | });
20 | });
21 |
--------------------------------------------------------------------------------
/webpack.dev.babel.js:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2018 ml5
2 | //
3 | // This software is released under the MIT License.
4 | // https://opensource.org/licenses/MIT
5 |
6 | import { join } from 'path';
7 | import merge from 'webpack-merge';
8 | import common from './webpack.common.babel';
9 | import HtmlWebpackPlugin from 'html-webpack-plugin';
10 |
11 | export default merge(common, {
12 | mode: 'development',
13 | devtool: 'inline-source-map',
14 | devServer: {
15 | watchContentBase: true,
16 | contentBase: join(__dirname, './dist'),
17 | disableHostCheck: true,
18 | },
19 | plugins: [
20 | new HtmlWebpackPlugin({
21 | title: 'ml5'
22 | })
23 | ]
24 | })
25 |
--------------------------------------------------------------------------------
/scripts/updatePackageVersion.js:
--------------------------------------------------------------------------------
1 | const fs = require('fs');
2 | const newVersionNumber = process.env.newversion;
3 |
4 | function checkVersionGiven(){
5 | if(newVersionNumber === undefined){
6 | console.log('🔥🔥🔥submit the new version number 🔥🔥🔥');
7 | process.exit(22);
8 | }
9 | }
10 |
11 | function updatePackageVersion(fpath){
12 | checkVersionGiven();
13 | let packageJson = fs.readFileSync(fpath);
14 | packageJson = JSON.parse(packageJson);
15 | packageJson.version = newVersionNumber;
16 |
17 |
18 | fs.writeFileSync(fpath, JSON.stringify(packageJson, null, 2));
19 |
20 | }
21 | updatePackageVersion('./package.json')
22 |
23 | // module.exports = updatePackageVersion;
--------------------------------------------------------------------------------
/src/ObjectDetector/index_test.js:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2019 ml5
2 | //
3 | // This software is released under the MIT License.
4 | // https://opensource.org/licenses/MIT
5 |
6 | const { objectDetector } = ml5;
7 |
8 | describe('ObjectDetector', () => {
9 | let detector;
10 |
11 | beforeAll(async () => {
12 | jasmine.DEFAULT_TIMEOUT_INTERVAL = 100000;
13 | detector = await objectDetector('CocoSsd');
14 | });
15 |
16 | it('throws error when a non image is trying to be detected', async () => {
17 | const notAnImage = 'not_an_image'
18 | try {
19 | await detector.detect(notAnImage);
20 | fail('Error should have been thrown');
21 | }
22 | catch (error) {
23 | expect(error.message).toBe('Detection subject not supported');
24 | }
25 | });
26 | });
27 |
--------------------------------------------------------------------------------
/webpack.common.babel.js:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2018 ml5
2 | //
3 | // This software is released under the MIT License.
4 | // https://opensource.org/licenses/MIT
5 |
6 | import { join, resolve } from 'path';
7 |
8 | const include = join(__dirname, 'src');
9 |
10 | export default {
11 | entry: ['babel-polyfill', './src/index.js'],
12 | output: {
13 | path: resolve(__dirname, 'dist'),
14 | publicPath: '/',
15 | libraryTarget: 'umd',
16 | filename: 'ml5.js',
17 | library: 'ml5',
18 | },
19 | module: {
20 | rules: [
21 | {
22 | enforce: 'pre',
23 | test: /\.js$/,
24 | exclude: /node_modules/,
25 | loader: 'eslint-loader',
26 | },
27 | {
28 | test: /\.js$/,
29 | loader: 'babel-loader',
30 | include,
31 | },
32 | ],
33 | },
34 | node: {
35 | fs: "empty"
36 | }
37 | };
38 |
--------------------------------------------------------------------------------
/src/utils/io.js:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2018 ml5
2 | //
3 | // This software is released under the MIT License.
4 | // https://opensource.org/licenses/MIT
5 |
6 | const saveBlob = async (data, name, type) => {
7 | const link = document.createElement('a');
8 | link.style.display = 'none';
9 | document.body.appendChild(link);
10 | const blob = new Blob([data], { type });
11 | link.href = URL.createObjectURL(blob);
12 | link.download = name;
13 | link.click();
14 | };
15 |
16 | const loadFile = async (path, callback) => fetch(path)
17 | .then(response => response.json())
18 | .then((json) => {
19 | if (callback) {
20 | callback(null, json);
21 | }
22 | return json;
23 | })
24 | .catch((error) => {
25 | if (callback) {
26 | callback(error);
27 | }
28 | console.error(`There has been a problem loading the file: ${error.message}`);
29 | throw error;
30 | });
31 |
32 | export {
33 | saveBlob,
34 | loadFile,
35 | };
36 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2018 ML5.js
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
6 |
7 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
8 |
9 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------------
/src/StyleTransfer/index_test.js:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2018 ml5
2 | //
3 | // This software is released under the MIT License.
4 | // https://opensource.org/licenses/MIT
5 |
6 |
7 | const { styleTransfer } = ml5;
8 |
9 | const STYLE_TRANSFER_MODEL = 'https://rawgit.com/ml5js/ml5-data-and-models/master/models/style-transfer/matta/';
10 | const STYLE_TRANSFER_DEFAULTS = {
11 | size: 200,
12 | };
13 |
14 | describe('styleTransfer', () => {
15 | let style;
16 |
17 | async function getImage() {
18 | const img = new Image();
19 | img.crossOrigin = true;
20 | img.src = 'https://ml5js.org/docs/assets/img/bird.jpg';
21 | await new Promise((resolve) => { img.onload = resolve; });
22 | return img;
23 | }
24 |
25 | beforeAll(async () => {
26 | // jasmine.DEFAULT_TIMEOUT_INTERVAL = 100000;
27 | // style = styleTransfer(STYLE_TRANSFER_MODEL);
28 | });
29 |
30 | it('instantiates styleTransfer', () => {
31 | // expect(style.size).toBe(STYLE_TRANSFER_DEFAULTS.size);
32 | });
33 |
34 | // it('styles an image', async () => {
35 | // const image = await getImage();
36 | // style.transfer(image, (err, result) => {
37 | // expect(result.src).Any(String);
38 | // });
39 | // });
40 | });
41 |
--------------------------------------------------------------------------------
/docs/reference/index.md:
--------------------------------------------------------------------------------
1 | # Reference
2 |
3 | Welcome to the ml5.js reference page! Here you can browse the various categories of functionality that ml5.js provides. We have categorized the functionality of ml5.js based on the types of input and output that you might be interested to work with.
4 |
5 | We currently have 4 categories:
6 |
7 | * **Helpers**:
8 | * The ml5 *helpers* category groups ml5 features that are broadly related to machine learning data, working with data, manipulating data, and training a model with data. Part of our helpers includes the [ml5.neuralNetwork](neural-network.md) which allows you to build and train your own neural network right in the browser. You can also explore the [ml5.featureExtractor](feature-extractor.md) to do [transfer learning](https://en.wikipedia.org/wiki/Transfer_learning).
9 | * **Image**:
10 | * The ml5 *image* category groups ml5 features that are related to applying machine learning to images or video.
11 | * **Sound**
12 | * The ml5 *sound* category groups ml5 features that are related to applying machine learning to audio.
13 | * **Text**
14 | * The ml5 *text* category groups ml5 features that are related to applying machine learning to text.
15 |
16 | See the sidebar for more information.
--------------------------------------------------------------------------------
/src/utils/gettopkclasses.js:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2018 ml5
2 | //
3 | // This software is released under the MIT License.
4 | // https://opensource.org/licenses/MIT
5 |
6 | export function getTopKClassesFromArray(values, topK, CLASSES) {
7 | const valuesAndIndices = [];
8 | for (let i = 0; i < values.length; i += 1) {
9 | valuesAndIndices.push({
10 | value: values[i],
11 | index: i,
12 | });
13 | }
14 | valuesAndIndices.sort((a, b) => b.value - a.value);
15 |
16 | const topkValues = new Float32Array(topK);
17 | const topkIndices = new Int32Array(topK);
18 | for (let i = 0; i < topK; i += 1) {
19 | topkValues[i] = valuesAndIndices[i].value;
20 | topkIndices[i] = valuesAndIndices[i].index;
21 | }
22 |
23 | const topClassesAndProbs = [];
24 | for (let i = 0; i < topkIndices.length; i += 1) {
25 | topClassesAndProbs.push({
26 | className: CLASSES[topkIndices[i]],
27 | probability: topkValues[i],
28 | });
29 | }
30 | return topClassesAndProbs;
31 | }
32 |
33 | export async function getTopKClassesFromTensor(logits, topK, CLASSES) {
34 | const values = await logits.data();
35 | return getTopKClassesFromArray(values, topK, CLASSES);
36 | }
37 |
38 | export default { getTopKClassesFromArray, getTopKClassesFromTensor }
39 |
--------------------------------------------------------------------------------
/scripts/updateReadme.js:
--------------------------------------------------------------------------------
1 | const fs = require('fs');
2 |
3 | function getReadme(fpath){
4 |
5 | const readme = fs.readFileSync(fpath, 'utf8');
6 |
7 | return readme;
8 | }
9 |
10 | function readPackageJson(fpath){
11 | let output = fs.readFileSync(fpath);
12 | output = JSON.parse(output);
13 | return output;
14 | }
15 |
16 | function makeNewVersionString(newVersionNumber){
17 |
18 | const newVersionString = `
19 |
20 |
21 | * You can use the latest version (${newVersionNumber}) by adding it to the head section of your HTML document:
22 |
23 | **v${newVersionNumber}**
24 |
25 |
26 |
27 |
58 |
59 | * [Contributor Notes](/styleguide/contributor-notes.md)
60 | * [Maintenance Notes](/styleguide/maintenance-notes.md)
61 | * Style Guide
62 | * [Reference guidelines](/styleguide/reference-guidelines.md)
63 | * [Development guidelines](/styleguide/development-guidelines.md)
64 | * [Design guidelines](/styleguide/design-guidelines.md)
65 |
--------------------------------------------------------------------------------
/src/BodyPix/BODYPIX_PALETTE.js:
--------------------------------------------------------------------------------
1 | export default {
2 | // "none": {
3 | // "id": -1,
4 | // "color": [0, 0, 0]
5 | // },
6 | "leftFace": {
7 | "id": 0,
8 | "color": [110, 64, 170]
9 | },
10 | "rightFace": {
11 | "id": 1,
12 | "color": [106, 72, 183]
13 | },
14 | "rightUpperLegFront": {
15 | "id": 2,
16 | "color": [100, 81, 196]
17 | },
18 | "rightLowerLegBack": {
19 | "id": 3,
20 | "color": [92, 91, 206]
21 | },
22 | "rightUpperLegBack": {
23 | "id": 4,
24 | "color": [84, 101, 214]
25 | },
26 | "leftLowerLegFront": {
27 | "id": 5,
28 | "color": [75, 113, 221]
29 | },
30 | "leftUpperLegFront": {
31 | "id": 6,
32 | "color": [66, 125, 224]
33 | },
34 | "leftUpperLegBack": {
35 | "id": 7,
36 | "color": [56, 138, 226]
37 | },
38 | "leftLowerLegBack": {
39 | "id": 8,
40 | "color": [48, 150, 224]
41 | },
42 | "rightFeet": {
43 | "id": 9,
44 | "color": [40, 163, 220]
45 | },
46 | "rightLowerLegFront": {
47 | "id": 10,
48 | "color": [33, 176, 214]
49 | },
50 | "leftFeet": {
51 | "id": 11,
52 | "color": [29, 188, 205]
53 | },
54 | "torsoFront": {
55 | "id": 12,
56 | "color": [26, 199, 194]
57 | },
58 | "torsoBack": {
59 | "id": 13,
60 | "color": [26, 210, 182]
61 | },
62 | "rightUpperArmFront": {
63 | "id": 14,
64 | "color": [28, 219, 169]
65 | },
66 | "rightUpperArmBack": {
67 | "id": 15,
68 | "color": [33, 227, 155]
69 | },
70 | "rightLowerArmBack": {
71 | "id": 16,
72 | "color": [41, 234, 141]
73 | },
74 | "leftLowerArmFront": {
75 | "id": 17,
76 | "color": [51, 240, 128]
77 | },
78 | "leftUpperArmFront": {
79 | "id": 18,
80 | "color": [64, 243, 116]
81 | },
82 | "leftUpperArmBack": {
83 | "id": 19,
84 | "color": [79, 246, 105]
85 | },
86 | "leftLowerArmBack": {
87 | "id": 20,
88 | "color": [96, 247, 97]
89 | },
90 | "rightHand": {
91 | "id": 21,
92 | "color": [115, 246, 91]
93 | },
94 | "rightLowerArmFront": {
95 | "id": 22,
96 | "color": [134, 245, 88]
97 | },
98 | "leftHand": {
99 | "id": 23,
100 | "color": [155, 243, 88]
101 | }
102 | }
--------------------------------------------------------------------------------
/src/utils/checkpointLoaderPix2pix.js:
--------------------------------------------------------------------------------
1 | /* eslint max-len: "off" */
2 |
3 | import * as tf from '@tensorflow/tfjs';
4 |
5 | export default class CheckpointLoaderPix2pix {
6 | constructor(urlPath) {
7 | this.urlPath = urlPath;
8 | }
9 |
10 | getAllVariables() {
11 | return new Promise((resolve, reject) => {
12 | const weightsCache = {};
13 | if (this.urlPath in weightsCache) {
14 | resolve(weightsCache[this.urlPath]);
15 | return;
16 | }
17 |
18 | const xhr = new XMLHttpRequest();
19 | xhr.open('GET', this.urlPath, true);
20 | xhr.responseType = 'arraybuffer';
21 | xhr.onload = () => {
22 | if (xhr.status !== 200) {
23 | reject(new Error('missing model'));
24 | return;
25 | }
26 | const buf = xhr.response;
27 | if (!buf) {
28 | reject(new Error('invalid arraybuffer'));
29 | return;
30 | }
31 |
32 | const parts = [];
33 | let offset = 0;
34 | while (offset < buf.byteLength) {
35 | const b = new Uint8Array(buf.slice(offset, offset + 4));
36 | offset += 4;
37 | const len = (b[0] << 24) + (b[1] << 16) + (b[2] << 8) + b[3]; // eslint-disable-line no-bitwise
38 | parts.push(buf.slice(offset, offset + len));
39 | offset += len;
40 | }
41 |
42 | const shapes = JSON.parse((new TextDecoder('utf8')).decode(parts[0]));
43 | const index = new Float32Array(parts[1]);
44 | const encoded = new Uint8Array(parts[2]);
45 |
46 | // decode using index
47 | const arr = new Float32Array(encoded.length);
48 | for (let i = 0; i < arr.length; i += 1) {
49 | arr[i] = index[encoded[i]];
50 | }
51 |
52 | const weights = {};
53 | offset = 0;
54 | for (let i = 0; i < shapes.length; i += 1) {
55 | const { shape } = shapes[i];
56 | const size = shape.reduce((total, num) => total * num);
57 | const values = arr.slice(offset, offset + size);
58 | const tfarr = tf.tensor1d(values, 'float32');
59 | weights[shapes[i].name] = tfarr.reshape(shape);
60 | offset += size;
61 | }
62 | weightsCache[this.urlPath] = weights;
63 | resolve(weights);
64 | };
65 | xhr.send(null);
66 | });
67 | }
68 | }
69 |
--------------------------------------------------------------------------------
/karma.conf.js:
--------------------------------------------------------------------------------
1 |
2 | module.exports = (config) => {
3 | config.set({
4 | client: {
5 | model: config.model
6 | },
7 | frameworks: ['jasmine'],
8 | files: [
9 | 'src/index.js',
10 | `src/${config.model ? config.model : '**'}/*_test.js`,
11 | `src/${config.model ? config.model : '**'}/**/*_test.js`,
12 | ],
13 | preprocessors: {
14 | 'src/index.js': ['webpack'],
15 | },
16 | webpack: {
17 | // TODO: This is duplication of the webpack.common.babel.js file, but they
18 | // use different import syntaxes so it's not easy to just require it here.
19 | // Maybe this could be put into a JSON file, but the include in the module
20 | // rules is dynamic.
21 | entry: ['babel-polyfill', './src/index.js'],
22 | output: {
23 | libraryTarget: 'umd',
24 | filename: 'ml5.js',
25 | library: 'ml5',
26 | },
27 | module: {
28 | rules: [
29 | {
30 | enforce: 'pre',
31 | test: /\.js$/,
32 | exclude: /node_modules/,
33 | loader: 'eslint-loader',
34 | },
35 | {
36 | test: /\.js$/,
37 | loader: 'babel-loader',
38 | include: require('path').resolve(__dirname, 'src'),
39 | },
40 | ],
41 | },
42 | // Don't minify the webpack build for better stack traces
43 | optimization: {
44 | minimize: false,
45 | },
46 | node: {
47 | fs: "empty"
48 | }
49 | },
50 | webpackMiddleware: {
51 | noInfo: true,
52 | stats: 'errors-only',
53 | },
54 | browserStack: {
55 | username: process.env.BROWSERSTACK_USERNAME,
56 | accessKey: process.env.BROWSERSTACK_ACCESS_KEY
57 | },
58 | captureTimeout: 120000,
59 | reportSlowerThan: 500,
60 | browserNoActivityTimeout: 180000,
61 | customLaunchers: {
62 | bs_chrome_mac: {
63 | base: 'BrowserStack',
64 | browser: 'chrome',
65 | browser_version: 'latest',
66 | os: 'OS X',
67 | os_version: 'High Sierra'
68 | },
69 | },
70 | reporters: ['mocha'],
71 | port: 9876,
72 | colors: true,
73 | logLevel: config.LOG_INFO,
74 | autoWatch: true,
75 | browsers: ['Chrome'],
76 | singleRun: false,
77 | concurrency: Infinity
78 | });
79 | };
80 |
--------------------------------------------------------------------------------
/src/FaceApi/index_test.js:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2018 ml5
2 | //
3 | // This software is released under the MIT License.
4 | // https://opensource.org/licenses/MIT
5 |
6 | const {
7 | faceApi
8 | } = ml5;
9 |
10 | const FACEAPI_DEFAULTS = {
11 | withLandmarks: true,
12 | withDescriptors: true,
13 | MODEL_URLS: {
14 | Mobilenetv1Model: 'https://raw.githubusercontent.com/ml5js/ml5-data-and-models/face-api/models/faceapi/ssd_mobilenetv1_model-weights_manifest.json',
15 | FaceLandmarkModel: 'https://raw.githubusercontent.com/ml5js/ml5-data-and-models/face-api/models/faceapi/face_landmark_68_model-weights_manifest.json',
16 | FaceLandmark68TinyNet: 'https://raw.githubusercontent.com/ml5js/ml5-data-and-models/face-api/models/faceapi/face_landmark_68_tiny_model-weights_manifest.json',
17 | FaceRecognitionModel: 'https://raw.githubusercontent.com/ml5js/ml5-data-and-models/face-api/models/faceapi/face_recognition_model-weights_manifest.json',
18 | }
19 | }
20 |
21 |
22 | describe('faceApi', () => {
23 | let faceapi;
24 |
25 | async function getImage() {
26 | const img = new Image();
27 | img.crossOrigin = true;
28 | img.src = 'https://raw.githubusercontent.com/ml5js/ml5-examples/development/p5js/FaceApi/FaceApi_Image_Landmarks/assets/frida.jpg';
29 | await new Promise((resolve) => {
30 | img.onload = resolve;
31 | });
32 | return img;
33 | }
34 |
35 | // async function getCanvas() {
36 | // const img = await getImage();
37 | // const canvas = document.createElement('canvas');
38 | // canvas.width = img.width;
39 | // canvas.height = img.height;
40 | // canvas.getContext('2d').drawImage(img, 0, 0);
41 | // return canvas;
42 | // }
43 |
44 | beforeAll(async () => {
45 | jasmine.DEFAULT_TIMEOUT_INTERVAL = 15000;
46 | faceapi = await faceApi();
47 | });
48 |
49 | describe('landmarks', () => {
50 |
51 | it('Should create faceApi with all the defaults', async () => {
52 | expect(faceapi.config.withLandmarks).toBe(FACEAPI_DEFAULTS.withLandmarks);
53 | expect(faceapi.config.withDescriptors).toBe(FACEAPI_DEFAULTS.withDescriptors);
54 | });
55 |
56 | it('Should get landmarks for Frida', async () => {
57 | const img = await getImage();
58 | await faceapi.detectSingle(img)
59 | .then(results => {
60 | expect(results.landmarks).toEqual(jasmine.any(Object));
61 | })
62 | });
63 | });
64 | });
--------------------------------------------------------------------------------
/src/ImageClassifier/doodlenet.js:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2018 ml5
2 | //
3 | // This software is released under the MIT License.
4 | // https://opensource.org/licenses/MIT
5 |
6 | import * as tf from '@tensorflow/tfjs';
7 | import { getTopKClassesFromTensor } from '../utils/gettopkclasses';
8 | import DOODLENET_CLASSES from '../utils/DOODLENET_CLASSES';
9 |
10 | const DEFAULTS = {
11 | DOODLENET_URL: 'https://cdn.jsdelivr.net/gh/ml5js/ml5-data-and-models@master/models/doodlenet/model.json',
12 | IMAGE_SIZE_DOODLENET: 28,
13 | };
14 |
15 | function preProcess(img, size) {
16 | let image;
17 | if (!(img instanceof tf.Tensor)) {
18 | if (img instanceof HTMLImageElement
19 | || img instanceof HTMLVideoElement
20 | || img instanceof HTMLCanvasElement
21 | || img instanceof ImageData) {
22 | image = tf.browser.fromPixels(img);
23 | } else if (typeof img === 'object' && (img.elt instanceof HTMLImageElement
24 | || img.elt instanceof HTMLVideoElement
25 | || img.elt instanceof HTMLCanvasElement
26 | || img.elt instanceof ImageData)) {
27 | image = tf.browser.fromPixels(img.elt); // Handle p5.js image, video and canvas.
28 | }
29 | } else {
30 | image = img;
31 | }
32 | const normalized = tf.scalar(1).sub(image.toFloat().div(tf.scalar(255)));
33 | let resized = normalized;
34 | if (normalized.shape[0] !== size || normalized.shape[1] !== size) {
35 | resized = tf.image.resizeBilinear(normalized, [size, size]);
36 | }
37 |
38 | const [r, g, b] = tf.split(resized, 3, 3);
39 | const gray = (r.add(g).add(b)).div(tf.scalar(3)).floor(); // Get average r,g,b color value and round to 0 or 1
40 | const batched = gray.reshape([1, size, size, 1]);
41 | return batched;
42 | }
43 |
44 | export class Doodlenet {
45 | constructor() {
46 | this.imgSize = DEFAULTS.IMAGE_SIZE_DOODLENET;
47 | }
48 |
49 | async load() {
50 | this.model = await tf.loadLayersModel(DEFAULTS.DOODLENET_URL);
51 |
52 | // Warmup the model.
53 | const result = tf.tidy(() => this.model.predict(tf.zeros([1, this.imgSize, this.imgSize, 1])));
54 | await result.data();
55 | result.dispose();
56 | }
57 |
58 | async classify(img, topk = 10) {
59 | const logits = tf.tidy(() => {
60 | const imgData = preProcess(img, this.imgSize);
61 | const predictions = this.model.predict(imgData);
62 | return predictions;
63 | });
64 | const classes = await getTopKClassesFromTensor(logits, topk, DOODLENET_CLASSES);
65 | logits.dispose();
66 | return classes;
67 | }
68 | }
69 |
70 | export async function load() {
71 | const doodlenet = new Doodlenet();
72 | await doodlenet.load();
73 | return doodlenet;
74 | }
75 |
--------------------------------------------------------------------------------
/src/BodyPix/index_test.js:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2018 ml5
2 | //
3 | // This software is released under the MIT License.
4 | // https://opensource.org/licenses/MIT
5 |
6 | const { bodyPix } = ml5;
7 |
8 | const BODYPIX_DEFAULTS = {
9 | "multiplier": 0.75,
10 | "outputStride": 16,
11 | "segmentationThreshold": 0.5
12 | };
13 |
14 | describe('bodyPix', () => {
15 | let bp;
16 |
17 | async function getImage() {
18 | const img = new Image();
19 | img.crossOrigin = true;
20 | img.src = 'https://cdn.jsdelivr.net/gh/ml5js/ml5-data-and-models@master/tests/images/harriet_128x128.jpg';
21 | await new Promise((resolve) => { img.onload = resolve; });
22 | return img;
23 | }
24 |
25 | async function getCanvas() {
26 | const img = await getImage();
27 | const canvas = document.createElement('canvas');
28 | canvas.width = img.width;
29 | canvas.height = img.height;
30 | canvas.getContext('2d').drawImage(img, 0, 0);
31 | return canvas;
32 | }
33 |
34 | async function getImageData() {
35 | const arr = new Uint8ClampedArray(40000);
36 |
37 | // Iterate through every pixel
38 | for (let i = 0; i < arr.length; i += 4) {
39 | arr[i + 0] = 0; // R value
40 | arr[i + 1] = 190; // G value
41 | arr[i + 2] = 0; // B value
42 | arr[i + 3] = 255; // A value
43 | }
44 |
45 | // Initialize a new ImageData object
46 | const img = new ImageData(arr, 200);
47 | return img;
48 | }
49 |
50 | beforeAll(async () => {
51 | jasmine.DEFAULT_TIMEOUT_INTERVAL = 5000;
52 | bp = await bodyPix();
53 | });
54 |
55 | it('Should create bodyPix with all the defaults', async () => {
56 | expect(bp.config.multiplier).toBe(BODYPIX_DEFAULTS.multiplier);
57 | expect(bp.config.outputStride).toBe(BODYPIX_DEFAULTS.outputStride);
58 | expect(bp.config.segmentationThreshold).toBe(BODYPIX_DEFAULTS.segmentationThreshold);
59 | });
60 |
61 | it('segment takes ImageData', async () => {
62 | const img = await getImageData();
63 | const results = await bp.segment(img);
64 | // 200 * 50 == 10,000 * 4 == 40,000 the size of the array
65 | expect(results.segmentation.width).toBe(200);
66 | expect(results.segmentation.height).toBe(50);
67 | });
68 |
69 | describe('segmentation', () => {
70 | it('Should segment an image of a Harriet Tubman with a width and height of 128', async () => {
71 | const img = await getImage();
72 | await bp.segment(img)
73 | .then(results => {
74 | expect(results.segmentation.width).toBe(128);
75 | expect(results.segmentation.height).toBe(128);
76 |
77 | expect(results.segmentation.width).toBe(128);
78 | expect(results.segmentation.height).toBe(128);
79 |
80 | expect(results.segmentation.width).toBe(128);
81 | expect(results.segmentation.height).toBe(128);
82 |
83 | })
84 | });
85 |
86 | });
87 | });
88 |
--------------------------------------------------------------------------------
/docs/styleguide/development-guidelines.md:
--------------------------------------------------------------------------------
1 | # Development Guidelines
2 |
3 | ## Principles for ml5.js source code
4 |
5 | Here are some principles that we try to uphold while developing `ml5-library`:
6 |
7 | | Guideline | description |
8 | | ------------- | ------------ |
9 | | **Clarity over Complexity** | We strive to make using and maintaining ml5 as approachable as possible. This means that we prefer to define sensible `defaults` (more on that later), simple short function names, and hiding data wrangling steps from the user. |
10 | | **Readibility over Fanciness** | We're all for supporting the latest and greatest, but the reality is that the maintainers of ml5 come from a wide background of skills and levels. To promote contributions from all levels, we favor more readable code than the most efficient or streamlined. |
11 | | **Property and function names** | A guideline drawn from Processing is that function and property names should not be more than 2 words mashed together in camelCase. We try our best to adhere to this, except in cases where were are aligning as closely as possible to the API or pretrained model we've wrapped up. This is totally TBD |
12 | | **modelNames** | Our general rule of thumb is to use camelCase for models -- e.g. `ml5.bodyPix()` or `ml5.poseNet()`. In some cases, the rules aren't entirely clear, but this is what we strive for. |
13 | | **Indentation & Code Formatting** | The easiest thing to do here is use a code formatter. You can see our settings in the [.vscode]() file. |
14 |
15 |
16 |
17 | ## Principles for ml5.js examples
18 |
19 | In addition to the principles above, we try our best to write our `ml5-examples`:
20 |
21 | | Guideline | description |
22 | | ------------- | ------------ |
23 | | **Beginner friendly first** | We try to make sure the examples are as beginner friendly as possible. For example, this means using `for loops` rather than *fancy (but lovely) javascript array methods* when possible, or sometimes doing things in multiple steps. |
24 | | **With as little HTML markup as possible** | We try to focus as much of the example in javascript as possible. That being said, it is not always possible to avoid HTML markup or advantageous to do so. When possible, add elements with javascript. |
25 |
26 |
27 | ## Valuing contributions
28 |
29 | We use the all-contributor's bot to add contributors to the various ml5-repositories.
30 |
31 | When a community member whether it is someone who flags an issue in a helpful way, makes a PR, hosts an event or workshop, etc in a way that is considered to the ml5 team as a "contribution", we add them by posting a comment in the respective PR or github issue:
32 |
33 | ```
34 | @all-contributors please add @ for
35 | ```
36 |
37 | All of the key words for the all-contributors bot can be found here: https://allcontributors.org/docs/en/emoji-key
--------------------------------------------------------------------------------
/docs/reference/sentiment.md:
--------------------------------------------------------------------------------
1 | # Sentiment
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 | ## Description
10 |
11 | Sentiment is a model trained to predict the sentiment of any given text. The default model, currently 'moviereviews', is trained using IMDB reviews that have been truncated to a maximum of 200 words, only the 20000 most used words in the reviews are used.
12 |
13 | ## Quickstart
14 |
15 | ```js
16 | // Create a new Sentiment method
17 | const sentiment = ml5.sentiment('movieReviews', modelReady);
18 |
19 | // When the model is loaded
20 | function modelReady() {
21 | // model is ready
22 | console.log("Model Loaded!");
23 | }
24 |
25 | // make the prediction
26 | const prediction = sentiment.predict(text);
27 | console.log(prediction);
28 | ```
29 |
30 |
31 | ## Usage
32 |
33 | ### Initialize
34 |
35 | ```js
36 | const magic = ml5.Sentiment(model, ?callback )
37 | ```
38 |
39 | #### Parameters
40 | * **model**: REQUIRED. Defaults to 'moviereviews'. You can also use a path to a `manifest.json` file via a relative or absolute path.
41 | * **callback**: OPTIONAL. A callback function that is called once the model has loaded. If no callback is provided, it will return a promise that will be resolved once the model has loaded.
42 |
43 |
44 | ### Properties
45 |
46 | ***
47 | #### .ready
48 | > Boolean value that specifies if the model has loaded.
49 | ***
50 |
51 | ***
52 | #### .model
53 | > The model being used.
54 | ***
55 |
56 |
57 | ### Methods
58 |
59 |
60 | ***
61 | #### .predict()
62 | > Given a number, will make magicSparkles
63 |
64 | ```js
65 | sentiment.predict(text)
66 | ```
67 |
68 | 📥 **Inputs**
69 |
70 | * **text**: Required. String. A string of text to predict
71 |
72 |
73 | 📤 **Outputs**
74 |
75 | * **Object**: Scores the sentiment of given text with a value between 0 ("negative") and 1 ("positive").
76 |
77 | ***
78 |
79 |
80 | ## Examples
81 |
82 | **p5.js**
83 | * [Sentiment_Interactive](https://github.com/ml5js/ml5-examples/tree/development/p5js/Sentiment/Sentiment_Interactive)
84 |
85 | **p5 web editor**
86 | * [Sentiment_Interactive](https://editor.p5js.org/ml5/sketches/Sentiment_Interactive)
87 |
88 | **plain javascript**
89 | * [Sentiment_Interactive](https://github.com/ml5js/ml5-examples/tree/development/javascript/Sentiment/Sentiment_Interactive)
90 |
91 | ## Demo
92 |
93 | No demos yet - contribute one today!
94 |
95 | ## Tutorials
96 |
97 | No tutorials yet - contribute one today!
98 |
99 |
100 | ## Acknowledgements
101 |
102 | **Contributors**:
103 | * Itay Niv
104 |
105 | **Credits**:
106 | * Paper Reference | Website URL | Github Repo | Book reference | etc
107 |
108 |
109 | ## Source Code
110 |
111 | [/src/Sentiment/](https://github.com/ml5js/ml5-library/tree/development/src/Sentiment)
112 |
113 |
--------------------------------------------------------------------------------
/src/utils/checkpointLoader.js:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2018 ml5
2 | //
3 | // This software is released under the MIT License.
4 | // https://opensource.org/licenses/MIT
5 |
6 | import * as tf from '@tensorflow/tfjs';
7 |
8 | const MANIFEST_FILE = 'manifest.json';
9 |
10 | export default class CheckpointLoader {
11 | constructor(urlPath) {
12 | this.urlPath = urlPath;
13 | if (this.urlPath.charAt(this.urlPath.length - 1) !== '/') {
14 | this.urlPath += '/';
15 | }
16 | }
17 |
18 | async loadManifest() {
19 | return new Promise((resolve, reject) => {
20 | const xhr = new XMLHttpRequest();
21 | xhr.open('GET', this.urlPath + MANIFEST_FILE);
22 |
23 | xhr.onload = () => {
24 | this.checkpointManifest = JSON.parse(xhr.responseText);
25 | resolve();
26 | };
27 | xhr.onerror = (error) => {
28 | reject();
29 | throw new Error(`${MANIFEST_FILE} not found at ${this.urlPath}. ${error}`);
30 | };
31 | xhr.send();
32 | });
33 | }
34 |
35 |
36 | async getCheckpointManifest() {
37 | if (this.checkpointManifest == null) {
38 | await this.loadManifest();
39 | }
40 | return this.checkpointManifest;
41 | }
42 |
43 | async getAllVariables() {
44 | if (this.variables != null) {
45 | return Promise.resolve(this.variables);
46 | }
47 | await this.getCheckpointManifest();
48 | const variableNames = Object.keys(this.checkpointManifest);
49 | const variablePromises = variableNames.map(v => this.getVariable(v));
50 | return Promise.all(variablePromises).then((variables) => {
51 | this.variables = {};
52 | for (let i = 0; i < variables.length; i += 1) {
53 | this.variables[variableNames[i]] = variables[i];
54 | }
55 | return this.variables;
56 | });
57 | }
58 | getVariable(varName) {
59 | if (!(varName in this.checkpointManifest)) {
60 | throw new Error(`Cannot load non-existent variable ${varName}`);
61 | }
62 | const variableRequestPromiseMethod = (resolve) => {
63 | const xhr = new XMLHttpRequest();
64 | xhr.responseType = 'arraybuffer';
65 | const fname = this.checkpointManifest[varName].filename;
66 | xhr.open('GET', this.urlPath + fname);
67 | xhr.onload = () => {
68 | if (xhr.status === 404) {
69 | throw new Error(`Not found variable ${varName}`);
70 | }
71 | const values = new Float32Array(xhr.response);
72 | const tensor = tf.Tensor.make(this.checkpointManifest[varName].shape, { values });
73 | resolve(tensor);
74 | };
75 | xhr.onerror = (error) => {
76 | throw new Error(`Could not fetch variable ${varName}: ${error}`);
77 | };
78 | xhr.send();
79 | };
80 | if (this.checkpointManifest == null) {
81 | return new Promise((resolve) => {
82 | this.loadManifest().then(() => {
83 | new Promise(variableRequestPromiseMethod).then(resolve);
84 | });
85 | });
86 | }
87 | return new Promise(variableRequestPromiseMethod);
88 | }
89 | }
90 |
--------------------------------------------------------------------------------
/src/ObjectDetector/index.js:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2019 ml5
2 | //
3 | // This software is released under the MIT License.
4 | // https://opensource.org/licenses/MIT
5 |
6 | /*
7 | ObjectDetection
8 | */
9 |
10 | import YOLO from './YOLO/index';
11 | import CocoSsd from './CocoSsd/index';
12 |
13 | class ObjectDetector {
14 | /**
15 | * @typedef {Object} options
16 | * @property {number} filterBoxesThreshold - Optional. default 0.01
17 | * @property {number} IOUThreshold - Optional. default 0.4
18 | * @property {number} classProbThreshold - Optional. default 0.4
19 | */
20 | /**
21 | * Create ObjectDetector model. Works on video and images.
22 | * @param {string} modelNameOrUrl - The name or the URL of the model to use. Current model name options
23 | * are: 'YOLO' and 'CocoSsd'.
24 | * @param {Object} options - Optional. A set of options.
25 | * @param {function} callback - Optional. A callback function that is called once the model has loaded.
26 | */
27 | constructor(modelNameOrUrl, video, options, callback) {
28 |
29 | this.video = video;
30 | this.modelNameOrUrl = modelNameOrUrl;
31 | this.options = options || {};
32 | this.callback = callback;
33 |
34 | switch (modelNameOrUrl) {
35 | case "yolo":
36 | this.model = new YOLO(this.video, {
37 | disableDeprecationNotice: true,
38 | ...this.options },
39 | callback
40 | );
41 | return this;
42 | case "cocossd":
43 | this.model = new CocoSsd(this.video, this.options, callback);
44 | return this;
45 | default:
46 | // use cocossd as default
47 | this.model = new CocoSsd(this.video, this.options, callback);
48 | return this;
49 | }
50 | }
51 |
52 | }
53 |
54 | const objectDetector = (modelName, videoOrOptionsOrCallback, optionsOrCallback, cb) => {
55 |
56 | let video;
57 | let options = {};
58 | let callback = cb;
59 |
60 | let model = modelName;
61 | if (typeof model !== 'string') {
62 | throw new Error('Please specify a model to use. E.g: "YOLO"');
63 | } else if (model.indexOf('http') === -1) {
64 | model = modelName.toLowerCase();
65 | }
66 |
67 | if (videoOrOptionsOrCallback instanceof HTMLVideoElement) {
68 | video = videoOrOptionsOrCallback;
69 | } else if (
70 | typeof videoOrOptionsOrCallback === 'object' &&
71 | videoOrOptionsOrCallback.elt instanceof HTMLVideoElement
72 | ) {
73 | video = videoOrOptionsOrCallback.elt; // Handle a p5.js video element
74 | } else if (typeof videoOrOptionsOrCallback === 'object') {
75 | options = videoOrOptionsOrCallback;
76 | } else if (typeof videoOrOptionsOrCallback === 'function') {
77 | callback = videoOrOptionsOrCallback;
78 | }
79 |
80 | if (typeof optionsOrCallback === 'object') {
81 | options = optionsOrCallback;
82 | } else if (typeof optionsOrCallback === 'function') {
83 | callback = optionsOrCallback;
84 | }
85 |
86 | const instance = new ObjectDetector(model, video, options, callback);
87 |
88 | return instance.model.callback ? instance.model : instance.model.ready;
89 |
90 | }
91 |
92 | export default objectDetector;
--------------------------------------------------------------------------------
/src/utils/p5Utils.js:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2018 ml5
2 | //
3 | // This software is released under the MIT License.
4 | // https://opensource.org/licenses/MIT
5 |
6 | class P5Util {
7 | constructor() {
8 | this.p5Instance = window;
9 | }
10 |
11 | /**
12 | * Set p5 instance globally.
13 | * @param {Object} p5Instance
14 | */
15 | setP5Instance(p5Instance) {
16 | this.p5Instance = p5Instance;
17 | }
18 |
19 | /**
20 | * This function will check if the p5 is in the environment
21 | * Either it is in the p5Instance mode OR it is in the window
22 | * @returns {boolean} if it is in p5
23 | */
24 | checkP5() {
25 | // typeof this.p5Instance !== 'undefined' && this.p5Instance.p5 && this.p5Instance.p5.Image && typeof this.p5Instance.p5.Image === 'function'
26 | if (typeof this.p5Instance !== 'undefined' &&
27 | typeof this.p5Instance.loadImage === 'function' ||
28 | typeof this.p5Instance.p5 !== 'undefined' &&
29 | typeof this.p5Instance.p5.Image !== 'undefined' &&
30 | typeof this.p5Instance.p5.Image === 'function') return true;
31 | return false
32 | }
33 |
34 | /**
35 | * Convert a canvas to Blob
36 | * @param {HTMLCanvasElement} inputCanvas
37 | * @returns {Blob} blob object
38 | */
39 | /* eslint class-methods-use-this: ["error", { "exceptMethods": ["getBlob"] }] */
40 | getBlob(inputCanvas) {
41 | return new Promise((resolve) => {
42 | inputCanvas.toBlob((blob) => {
43 | resolve(blob);
44 | });
45 | });
46 | };
47 |
48 | /**
49 | * Load image in async way.
50 | * @param {String} url
51 | */
52 | loadAsync(url) {
53 | return new Promise((resolve) => {
54 | this.p5Instance.loadImage(url, (img) => {
55 | resolve(img);
56 | });
57 | });
58 | };
59 |
60 | /**
61 | * convert raw bytes to blob object
62 | * @param {Array} raws
63 | * @param {number} x
64 | * @param {number} y
65 | * @returns {Blob}
66 | */
67 | async rawToBlob(raws, x, y) {
68 | const arr = Array.from(raws)
69 | const canvas = document.createElement('canvas'); // Consider using offScreenCanvas when it is ready?
70 | const ctx = canvas.getContext('2d');
71 |
72 | canvas.width = x;
73 | canvas.height = y;
74 |
75 | const imgData = ctx.createImageData(x, y);
76 | const { data } = imgData;
77 |
78 | for (let i = 0; i < x * y * 4; i += 1 ) data[i] = arr[i];
79 | ctx.putImageData(imgData, 0, 0);
80 |
81 | const blob = await this.getBlob(canvas);
82 | return blob;
83 | };
84 |
85 | /**
86 | * Conver Blob to P5.Image
87 | * @param {Blob} blob
88 | * @param {Object} p5Img
89 | */
90 | async blobToP5Image(blob) {
91 | if (this.checkP5()) {
92 | const p5Img = await this.loadAsync(URL.createObjectURL(blob));
93 | return p5Img;
94 | }
95 | return null;
96 | };
97 |
98 | }
99 |
100 | const p5Utils = new P5Util();
101 |
102 | export default p5Utils;
103 |
--------------------------------------------------------------------------------
/docs/tutorials/promises-and-callbacks.md:
--------------------------------------------------------------------------------
1 | # Promises and Callback support in ml5
2 |
3 | ml5.js is heavily inspired by the syntax, patterns and style of the [p5.js](https://p5js.org/) library. However, there are several differences in how asynchronous operations are handled by ml5.js. ml5.js supports both error-first callbacks and Promises in all methods.
4 |
5 | ## Using Callbacks
6 |
7 | In [p5.js](https://p5js.org/), [callbacks](https://developer.mozilla.org/en-US/docs/Glossary/Callback_function) are passed as arguments to functions that often perform some asynchronous operation. For example, [p5.js](https://p5js.org/) defines the [**loadJSON()**](https://p5js.org/reference/#/p5/loadJSON) function as the following:
8 |
9 | ```javascript
10 | loadJSON("http//example.com/data.json", function(results) {
11 | // Do something with the results
12 | });
13 | ```
14 |
15 | Notice that the results from the callback in [p5.js](https://p5js.org/) are given as the only argument to the function. There is no error argument in the callback.
16 |
17 | ml5.js, on the other hand, uses a pattern referred to as an error-first callback:
18 |
19 | > With this pattern, a callback function is passed to the method as an argument. When the operation either completes or an error is raised, the callback function is called with the Error object (if any) passed as the first argument. If no error was raised, the first argument will be passed as null. [Taken from the Node.js documentation](https://nodejs.org/api/errors.html#errors_error_first_callbacks)
20 |
21 | For example if you are using the **imageClassifier()** method, you will need to construct it in the following way:
22 |
23 | ```javascript
24 | // Pass a callback function to constructor
25 | const classifier = ml5.imageClassifier('MobileNet', function(err, model) {
26 | console.log('Model Loaded!');
27 | }
28 |
29 | // Make a prediction with the selected image and pass a callback function with two arguments
30 | classifier.predict(image, function(err, results) {
31 | // Check for errors. If no errors, then do something with the results
32 | });
33 | ```
34 |
35 | Error first callbacks is a convention common to many JavaScript libraries that we have chosen to adopt. The language JavaScript itself does not enforce this pattern. Keep in mind that most ml5.js methods and functions are asynchronous (machine learning models can take significant amounts of time to process inputs and generate outputs!). You will need to use the error-first callback pattern if you want to use callbacks.
36 |
37 | ## Using Promises
38 |
39 | ml5.js also supports [Promises](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Promise). If no callback is provided to any asynchronous function then a Promise is returned.
40 |
41 | With Promises, the image classification example can be used in the following way:
42 |
43 | ```javascript
44 | // No callback needs to be passed to use Promises.
45 | ml5
46 | .imageClassifier("MobileNet")
47 | .then(classifier => classifier.predict(image))
48 | .then(results => {
49 | // Do something with the results
50 | });
51 | ```
52 |
53 | For some video tutorials about Promises, you can find this [Coding Train playlist](https://www.youtube.com/playlist?list=PLRqwX-V7Uu6bKLPQvPRNNE65kBL62mVfx). There is also a [video tutorial about the ES6 arrow notation (**=>**)](https://youtu.be/mrYMzpbFz18).
54 |
--------------------------------------------------------------------------------
/src/Word2vec/index_test.js:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2018 ml5
2 | //
3 | // This software is released under the MIT License.
4 | // https://opensource.org/licenses/MIT
5 |
6 | /* eslint no-loop-func: 0 */
7 | const { tf, word2vec } = ml5;
8 |
9 | const W2V_MODEL_URL = 'https://raw.githubusercontent.com/ml5js/ml5-data-and-training/master/models/wordvecs/common-english/wordvecs1000.json';
10 |
11 | describe('word2vec', () => {
12 | let word2vecInstance;
13 | let numTensorsBeforeAll;
14 | let numTensorsBeforeEach;
15 | beforeAll((done) => {
16 | jasmine.DEFAULT_TIMEOUT_INTERVAL = 60000;
17 | numTensorsBeforeAll = tf.memory().numTensors;
18 | word2vecInstance = word2vec(W2V_MODEL_URL, done);
19 | });
20 |
21 | // afterAll(() => {
22 | // word2vecInstance.dispose();
23 | // const numTensorsAfterAll = tf.memory().numTensors;
24 | // if (numTensorsBeforeAll !== numTensorsAfterAll) {
25 | // throw new Error(`Leaking Tensors (${numTensorsAfterAll} vs ${numTensorsBeforeAll})`);
26 | // }
27 | // });
28 |
29 | beforeEach(() => {
30 | numTensorsBeforeEach = tf.memory().numTensors;
31 | });
32 |
33 | afterEach(() => {
34 | const numTensorsAfterEach = tf.memory().numTensors;
35 | if (numTensorsBeforeEach !== numTensorsAfterEach) {
36 | throw new Error(`Leaking Tensors (${numTensorsAfterEach} vs ${numTensorsBeforeEach})`);
37 | }
38 | });
39 |
40 | it('creates a new instance', () => {
41 | expect(word2vecInstance).toEqual(jasmine.objectContaining({
42 | modelLoaded: true,
43 | modelSize: 1000,
44 | }));
45 | });
46 |
47 | describe('getRandomWord', () => {
48 | it('returns a random word', () => {
49 | word2vecInstance.getRandomWord()
50 | .then(word => expect(typeof word).toEqual('string'));
51 | });
52 | });
53 |
54 | describe('nearest', () => {
55 | it('returns a sorted array of nearest words', () => {
56 | for (let i = 0; i < 100; i += 1) {
57 | word2vecInstance.getRandomWord()
58 | .then(word => word2vecInstance.nearest(word))
59 | .then((nearest) => {
60 | let currentDistance = 0;
61 | for (let { word, distance: nextDistance } of nearest) {
62 | expect(typeof word).toEqual('string');
63 | expect(nextDistance).toBeGreaterThan(currentDistance);
64 | currentDistance = nextDistance;
65 | }
66 | })
67 | }
68 | });
69 |
70 | it('returns a list of the right length', () => {
71 | for (let i = 0; i < 100; i += 1) {
72 | word2vecInstance.getRandomWord()
73 | .then(word => word2vecInstance.nearest(word, i))
74 | .then(nearest => expect(nearest.length).toEqual(i));
75 | }
76 | });
77 | });
78 | describe('add', () => {
79 | it('cat + dog = horse', () => {
80 | word2vecInstance.add(['cat', 'dog'], 1)
81 | .then(result => expect(result[0].word).toBe('horse'));
82 | });
83 | });
84 |
85 | describe('subtract', () => {
86 | it('cat - dog = fish', () => {
87 | word2vecInstance.subtract(['cat', 'dog'], 1)
88 | .then(result => expect(result[0].word).toBe('fish'));
89 | });
90 | });
91 |
92 | describe('average', () => {
93 | it('moon & sun = avenue', () => {
94 | word2vecInstance.average(['moon', 'sun'], 1)
95 | .then(result => expect(result[0].word).toBe('earth'));
96 | });
97 | });
98 | });
99 |
--------------------------------------------------------------------------------
/src/ImageClassifier/darknet.js:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2018 ml5
2 | //
3 | // This software is released under the MIT License.
4 | // https://opensource.org/licenses/MIT
5 |
6 | import * as tf from '@tensorflow/tfjs';
7 | import { getTopKClassesFromTensor } from '../utils/gettopkclasses';
8 | import IMAGENET_CLASSES_DARKNET from '../utils/IMAGENET_CLASSES_DARKNET';
9 |
10 | const DEFAULTS = {
11 | DARKNET_URL: 'https://cdn.jsdelivr.net/gh/ml5js/ml5-data-and-models@master/models/darknetclassifier/darknetreference/model.json',
12 | DARKNET_TINY_URL: 'https://cdn.jsdelivr.net/gh/ml5js/ml5-data-and-models@master/models/darknetclassifier/darknettiny/model.json',
13 | IMAGE_SIZE_DARKNET: 256,
14 | IMAGE_SIZE_DARKNET_TINY: 224,
15 | };
16 |
17 | function preProcess(img, size) {
18 | let image;
19 | if (!(img instanceof tf.Tensor)) {
20 | if (img instanceof HTMLImageElement
21 | || img instanceof HTMLVideoElement
22 | || img instanceof HTMLCanvasElement
23 | || img instanceof ImageData) {
24 | image = tf.browser.fromPixels(img);
25 | } else if (typeof img === 'object' && (img.elt instanceof HTMLImageElement
26 | || img.elt instanceof HTMLVideoElement
27 | || img.elt instanceof HTMLCanvasElement
28 | || img.elt instanceof ImageData)) {
29 | image = tf.browser.fromPixels(img.elt); // Handle p5.js image and video.
30 | }
31 | } else {
32 | image = img;
33 | }
34 | const normalized = image.toFloat().div(tf.scalar(255));
35 | let resized = normalized;
36 | if (normalized.shape[0] !== size || normalized.shape[1] !== size) {
37 | const alignCorners = true;
38 | resized = tf.image.resizeBilinear(normalized, [size, size], alignCorners);
39 | }
40 | const batched = resized.reshape([1, size, size, 3]);
41 | return batched;
42 | }
43 |
44 | export class Darknet {
45 | constructor(version) {
46 | this.version = version;
47 | switch (this.version) {
48 | case 'reference':
49 | this.imgSize = DEFAULTS.IMAGE_SIZE_DARKNET;
50 | break;
51 | case 'tiny':
52 | this.imgSize = DEFAULTS.IMAGE_SIZE_DARKNET_TINY;
53 | break;
54 | default:
55 | break;
56 | }
57 | }
58 |
59 | async load() {
60 | switch (this.version) {
61 | case 'reference':
62 | this.model = await tf.loadLayersModel(DEFAULTS.DARKNET_URL);
63 | break;
64 | case 'tiny':
65 | this.model = await tf.loadLayersModel(DEFAULTS.DARKNET_TINY_URL);
66 | break;
67 | default:
68 | break;
69 | }
70 |
71 | // Warmup the model.
72 | const result = tf.tidy(() => this.model.predict(tf.zeros([1, this.imgSize, this.imgSize, 3])));
73 | await result.data();
74 | result.dispose();
75 | }
76 |
77 | async classify(img, topk = 3) {
78 | const logits = tf.tidy(() => {
79 | const imgData = preProcess(img, this.imgSize);
80 | const predictions = this.model.predict(imgData);
81 | return tf.softmax(predictions);
82 | });
83 | const classes = await getTopKClassesFromTensor(logits, topk, IMAGENET_CLASSES_DARKNET);
84 | logits.dispose();
85 | return classes;
86 | }
87 | }
88 |
89 | export async function load(version) {
90 | if (version !== 'reference' && version !== 'tiny') {
91 | throw new Error('Please select a version: darknet-reference or darknet-tiny');
92 | }
93 |
94 | const darknet = new Darknet(version);
95 | await darknet.load();
96 | return darknet;
97 | }
98 |
--------------------------------------------------------------------------------
/docs/reference/kmeans.md:
--------------------------------------------------------------------------------
1 | # Kmeans Clustering
2 |
3 |
4 |
5 |
6 |
7 |
8 | ## Description
9 |
10 | The KMeans clustering algorithm. Read more about it [here](https://en.wikipedia.org/wiki/K-means_clustering)
11 |
12 | ## Quickstart
13 |
14 | ```js
15 | const data = [{x: 0, y:0}, {x:0, y:1},{x:1, y:0}, {x:1, y:1}]
16 | const options = {
17 | 'k': 3,
18 | 'maxIter': 4,
19 | 'threshold': 0.5,
20 | };
21 | // Initialize the magicFeature
22 | const kmeans = ml5.kmeans(data, options, clustersCalculated);
23 |
24 | // When the model is loaded
25 | function clustersCalculated() {
26 | console.log('Points Clustered!');
27 | console.log(kmeans.dataset)
28 | }
29 |
30 | ```
31 |
32 |
33 | ## Usage
34 |
35 | ### Initialize
36 |
37 | ```js
38 | const kmeans = ml5.kmeans(data, ?options, ?callback);
39 | ```
40 |
41 | #### Parameters
42 | * **data**: REQUIRED. JSON object | Data URL. Can be a CSV or JSON dataset. Your data might look like:
43 | * csv:
44 | ```js
45 | x1,y1
46 | 1,2
47 | 3,4
48 | 5,6
49 | ```
50 | * json:
51 | ```js
52 | [{x: 0, y:0}, {x:0, y:1},{x:1, y:0}, {x:1, y:1}]
53 | ```
54 | * **options**: OPTIONAL. Sets the options including:
55 | * `k`: the number of clusters
56 | * `maxIter`: Max number of iterations to try before forcing convergence.
57 | * `threshold`: Threshold for updated centriod distance before declaring convergence.
58 | * **callback**: OPTIONAL. A callback function that is called once the kmeans clusters have been calculated.
59 |
60 |
61 | ### Properties
62 |
63 |
64 | ***
65 | #### .config
66 | > *Object*: object containing the configuration of the kmeans
67 | ***
68 |
69 | ***
70 | #### .dataset
71 | > **Array**: an array of objects containing the original data where each object is a "row" of data with a property called `centroid` indicating which cluster this point belongs to.
72 | ***
73 | ***
74 | #### .dataTensor
75 | > **Tensor**: an tensorflow tensor representing the `.dataset` property
76 | ***
77 | ***
78 | #### .centroids
79 | > **Tensor**: an tensorflow tensor representing the `.centroids`
80 | ***
81 |
82 |
83 |
84 | ### Methods
85 |
86 |
87 | * The ml5.kmeans() calculates the kmeans clusters of the input data. See usage above.
88 |
89 |
90 | ## Examples
91 |
92 | **p5.js**
93 | * [KMeans_imageSegmentation](https://github.com/ml5js/ml5-examples/tree/development/p5js/KMeans/KMeans_imageSegmentation/)
94 |
95 | **p5 web editor**
96 | * [KMeans_imageSegmentation](https://editor.p5js.org/ml5/sketches/KMeans_imageSegmentation/)
97 |
98 | **plain javascript**
99 | * coming soon
100 |
101 | **d3.js**
102 | * [KMeans_GaussianClusterDemo](https://github.com/ml5js/ml5-examples/tree/development/d3/KMeans/KMeans_GaussianClusterDemo)
103 |
104 | ## Demo
105 |
106 | No demos yet - contribute one today!
107 |
108 | ## Tutorials
109 |
110 | No tutorials yet - contribute one today!
111 |
112 |
113 | ## Acknowledgements
114 |
115 | **Contributors**:
116 | * [Jared Wilber](https://www.jwilber.me/)
117 |
118 | **Credits**:
119 | * Paper Reference | Website URL | Github Repo | Book reference | etc
120 |
121 | ## Source Code
122 |
123 | * [/src/KMeans](https://github.com/ml5js/ml5-library/tree/development/src/KMeans)
124 |
125 |
126 |
--------------------------------------------------------------------------------
/src/SoundClassifier/index.js:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2019 ml5
2 | //
3 | // This software is released under the MIT License.
4 | // https://opensource.org/licenses/MIT
5 |
6 | /*
7 | Sound Classifier using pre-trained networks
8 | */
9 |
10 | import * as tf from '@tensorflow/tfjs';
11 | import * as speechCommands from './speechcommands';
12 | import callCallback from '../utils/callcallback';
13 |
14 | const MODEL_OPTIONS = ['speechcommands18w'];
15 | class SoundClassifier {
16 | /**
17 | * Create an SoundClassifier.
18 | * @param {modelNameOrUrl} modelNameOrUrl - The name or the URL of the model to use. Current name options
19 | * are: 'SpeechCommands18w'.
20 | * @param {object} options - An object with options.
21 | * @param {function} callback - A callback to be called when the model is ready.
22 | */
23 | constructor(modelNameOrUrl, options, callback) {
24 | this.model = null;
25 | this.options = options;
26 | if (typeof modelNameOrUrl === 'string') {
27 | if (MODEL_OPTIONS.includes(modelNameOrUrl)) {
28 | this.modelName = modelNameOrUrl;
29 | this.modelUrl = null;
30 | switch (this.modelName) {
31 | case 'speechcommands18w':
32 | this.modelToUse = speechCommands;
33 | break;
34 | default:
35 | this.modelToUse = null;
36 | }
37 | } else {
38 | // Default to speechCommands for now
39 | this.modelToUse = speechCommands;
40 | this.modelUrl = modelNameOrUrl;
41 | }
42 | }
43 | // Load the model
44 | this.ready = callCallback(this.loadModel(options, this.modelUrl), callback);
45 | }
46 |
47 | async loadModel(options) {
48 | this.model = await this.modelToUse.load(options, this.modelUrl);
49 | return this;
50 | }
51 |
52 | async classifyInternal(numberOfClasses, callback) {
53 | // Wait for the model to be ready
54 | await this.ready;
55 | await tf.nextFrame();
56 |
57 | return this.model.classify(numberOfClasses, callback);
58 | }
59 |
60 | /**
61 | * Classifies the audio from microphone and takes a callback to handle the results
62 | * @param {function | number} numOrCallback -
63 | * takes any of the following params
64 | * @param {function} cb - a callback function that handles the results of the function.
65 | * @return {function} a promise or the results of a given callback, cb.
66 | */
67 | async classify(numOrCallback = null, cb) {
68 | let numberOfClasses = this.topk;
69 | let callback;
70 |
71 | if (typeof numOrCallback === 'number') {
72 | numberOfClasses = numOrCallback;
73 | } else if (typeof numOrCallback === 'function') {
74 | callback = numOrCallback;
75 | }
76 |
77 | if (typeof cb === 'function') {
78 | callback = cb;
79 | }
80 | return this.classifyInternal(numberOfClasses, callback);
81 | }
82 | }
83 |
84 | const soundClassifier = (modelName, optionsOrCallback, cb) => {
85 | let options = {};
86 | let callback = cb;
87 |
88 | let model = modelName;
89 | if (typeof model !== 'string') {
90 | throw new Error('Please specify a model to use. E.g: "SpeechCommands18w"');
91 | } else if (model.indexOf('http') === -1) {
92 | model = modelName.toLowerCase();
93 | }
94 |
95 | if (typeof optionsOrCallback === 'object') {
96 | options = optionsOrCallback;
97 | } else if (typeof optionsOrCallback === 'function') {
98 | callback = optionsOrCallback;
99 | }
100 |
101 | const instance = new SoundClassifier(model, options, callback);
102 | return callback ? instance : instance.ready;
103 | };
104 |
105 | export default soundClassifier;
106 |
--------------------------------------------------------------------------------
/docs/reference/cvae.md:
--------------------------------------------------------------------------------
1 | # CVAE: Condtional Variational Autoencoder
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 | ## Description
10 |
11 | An autoencoder is an neural network that learns how to encode data (like the pixels of an image) into a smaller representation. This is akin to image compression (although classic image compression algorithms are better!) A Variational Autoencoder (VAE) takes this idea one step further and is trained generate new images in the style of training data by sprinkling in a little bit of randomness. Conditional Variational Autoencoder (CVAE) is an extension of this idea with the ability to be more specific about what is generated. From [Two Minute Papers](https://www.youtube.com/watch?v=Rdpbnd0pCiI), the author explains that:
12 |
13 | *"Autoencoders are neural networks that are capable of creating sparse representations of the input data and can therefore be used for image compression. There are denoising autoencoders that after learning these sparse representations, can be presented with noisy images. What is even better is a variant that is called the variational autoencoder that not only learns these sparse representations, but can also draw new images as well. We can, for instance, ask it to create new handwritten digits and we can actually expect the results to make sense!"*
14 |
15 |
16 | ## Quickstart
17 |
18 | ```js
19 | const cvae = ml5.CVAE('model/quick_draw/manifest.json', modelReady);
20 |
21 | function modelReady() {
22 | // generate an image of an airplane
23 | cvae.generate('airplane', gotImage);
24 | }
25 |
26 | function gotImage(error, result) {
27 | if (error) {
28 | console.log(error);
29 | return;
30 | }
31 | // log the result
32 | console.log(result);
33 | }
34 | ```
35 |
36 |
37 | ## Usage
38 |
39 | ### Initialize
40 |
41 | ```js
42 | const magic = ml5.CVAE(?model, ?callback)
43 | ```
44 |
45 | #### Parameters
46 | * **model**: REQUIRED. The url path to your model. Can be an absolute or relative path.
47 | * **callback**: REQUIRED. A function to run once the model has been loaded.
48 |
49 |
50 | ### Properties
51 |
52 | ***
53 | #### .ready
54 | > *BOOLEAN*. Boolean value that specifies if the model has loaded.
55 | ***
56 |
57 |
58 | ### Methods
59 |
60 |
61 | ***
62 | #### .generate(label, callback);
63 | > Given a label, will generate an image.
64 |
65 | ```js
66 | cvae.generate(label, callback);
67 | ```
68 |
69 | 📥 **Inputs**
70 |
71 | * **label**: REQUIRED. String. A label of the feature your want to generate.
72 | * **callback**: REQUIRED. Function. A function to handle the results of ".generate()". Likely a function to do something with the generated image data.
73 |
74 | 📤 **Outputs**
75 |
76 | * **Object**: Returns "raw", "blob", and "tensor". If p5.js is available, a "p5Image" will be returned as well.
77 |
78 | ***
79 |
80 |
81 |
82 |
83 |
84 |
85 | ## Examples
86 |
87 | **p5.js**
88 | * [CVAE_QuickDraw](https://github.com/ml5js/ml5-examples/tree/development/p5js/CVAE/CVAE_QuickDraw)
89 |
90 | **p5 web editor**
91 | * [CVAE_QuickDraw](https://editor.p5js.org/ml5/sketches/CVAE_QuickDraw)
92 |
93 | **plain javascript**
94 | * [CVAE_QuickDraw](https://github.com/ml5js/ml5-examples/tree/development/javascript/CVAE/CVAE_QuickDraw)
95 |
96 |
97 | ## Demo
98 |
99 | No demos yet - contribute one today!
100 |
101 | ## Tutorials
102 |
103 | No tutorials yet - contribute one today!
104 |
105 | ## Acknowledgements
106 |
107 | **Contributors**:
108 | * Wenhe Li & Dingsu (Derek) Wang
109 |
110 | **Credits**:
111 | * Paper Reference | Website URL | Github Repo | Book reference | etc
112 |
113 |
114 | ## Source Code
115 |
116 | [/src/CVAE/](https://github.com/ml5js/ml5-library/tree/development/src/CVAE)
117 |
--------------------------------------------------------------------------------
/docs/reference/pix2pix.md:
--------------------------------------------------------------------------------
1 | # Pix2Pix
2 |
3 |
4 |
5 |
6 |
7 |
8 | Image: Image of drawing converted to the Pokemon character, Pikachu using Pix2Pix trained on Pikachu images. Trained by [Yining Shi](https://1023.io);
9 |
10 | ## Description
11 |
12 | Image-to-image translation with conditional adversarial nets, or pix2pix, is a machine learning technique developed by
13 | [Isola et al](https://github.com/phillipi/pix2pix) that learns how to map input images to output images.
14 |
15 | *The pix2pix model works by training on pairs of images such as building facade labels to building facades, and then attempts to generate the corresponding output image from any input image you give it. [Source](https://affinelayer.com/pixsrv/)*
16 |
17 | The original pix2pix TensorFlow implementation was made by [affinelayer](https://github.com/affinelayer/pix2pix-tensorflow).
18 | This version is heavily based on [Christopher Hesse TensorFlow.js implementation](https://github.com/affinelayer/pix2pix-tensorflow/tree/master/server)
19 |
20 | ## Quickstart
21 |
22 | ```js
23 | // Create a pix2pix model using a pre trained network
24 | const pix2pix = ml5.pix2pix("models/customModel.pict", modelLoaded);
25 |
26 | // When the model is loaded
27 | function modelLoaded() {
28 | console.log("Model Loaded!");
29 | }
30 |
31 | // Transfer using a canvas
32 | pix2pix.transfer(canvas, function(err, result) {
33 | console.log(result);
34 | });
35 | ```
36 |
37 |
38 | ## Usage
39 |
40 | ### Initialize
41 |
42 | ```js
43 | const styleTransfer = ml5.pix2pix(model, ?callback);
44 | ```
45 |
46 | #### Parameters
47 | * **model**: REQUIRED. The path for a valid model.
48 | * **callback**: OPTIONAL. A function to run once the model has been loaded. If no callback is provided, it will return a promise that will be resolved once the model has loaded.
49 |
50 |
51 | ### Properties
52 |
53 | ***
54 | #### .ready
55 | > Boolean to check if the model has loaded
56 | ***
57 |
58 |
59 | ### Methods
60 |
61 |
62 | ***
63 | #### .transfer()
64 | > Given an canvas element, applies image-to-image translation using the provided model. Returns an image.
65 |
66 | ```js
67 | styleTransfer.transfer(canvas, ?callback)
68 | ```
69 |
70 | 📥 **Inputs**
71 |
72 | * **canvas**: Required. A HTML canvas element.
73 | * **callback**: Optional. A function to run once the model has made the transfer. If no callback is provided, it will return a promise that will be resolved once the model has made the transfer.
74 |
75 | 📤 **Outputs**
76 |
77 | * **Image**: returns an HTMLImageObject
78 |
79 | ***
80 |
81 |
82 | ## Examples
83 |
84 |
85 | **p5.js**
86 | * [Pix2Pix_callback](https://github.com/ml5js/ml5-examples/tree/development/p5js/Pix2Pix/Pix2Pix_callback)
87 | * [Pix2Pix_promise](https://github.com/ml5js/ml5-examples/tree/development/p5js/Pix2Pix/Pix2Pix_promise)
88 |
89 | **p5 web editor**
90 | * [Pix2Pix_callback](https://editor.p5js.org/ml5/sketches/Pix2Pix_callback)
91 | * [Pix2Pix_promise](https://editor.p5js.org/ml5/sketches/Pix2Pix_promise)
92 |
93 | **plain javascript**
94 | * [Pix2Pix_callback](https://github.com/ml5js/ml5-examples/tree/development/javascript/Pix2Pix/Pix2Pix_callback)
95 | * [Pix2Pix_promise](https://github.com/ml5js/ml5-examples/tree/development/javascript/Pix2Pix/Pix2Pix_promise)
96 |
97 |
98 | ## Demo
99 |
100 | No demos yet - contribute one today!
101 |
102 | ## Tutorials
103 |
104 | No tutorials yet - contribute one today!
105 |
106 | ## Acknowledgements
107 |
108 | **Contributors**:
109 | * Yining Shi
110 |
111 | **Credits**:
112 | * Paper Reference | Website URL | Github Repo | Book reference | etc
113 |
114 | ## Source Code
115 |
116 | [/src/Pix2pix](https://github.com/ml5js/ml5-library/tree/development/src/Pix2pix)
117 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "ml5",
3 | "version": "0.4.3",
4 | "description": "A friendly machine learning library for the web.",
5 | "main": "dist/ml5.min.js",
6 | "directories": {
7 | "examples": "examples",
8 | "dist": "dist"
9 | },
10 | "scripts": {
11 | "commit": "git-cz",
12 | "prebuild": "rimraf dist",
13 | "start": "webpack-dev-server --open --config webpack.dev.babel.js",
14 | "manual-test": "webpack-dev-server --open --config webpack.test.babel.js",
15 | "build": "webpack --config webpack.prod.babel.js",
16 | "test": "./node_modules/karma/bin/karma start karma.conf.js ",
17 | "test:single": "./node_modules/karma/bin/karma start karma.conf.js --single-run",
18 | "test-travis": "./scripts/test-travis.sh",
19 | "serve:docs": "docsify serve docs",
20 | "update:readme": "node ./scripts/updateReadme.js",
21 | "update:docs": "node ./scripts/updateDocVersions.js $oldversion",
22 | "publish:npm": "npm run build && npm publish",
23 | "contributors:add": "all-contributors add",
24 | "contributors:generate": "all-contributors generate"
25 | },
26 | "repository": {
27 | "type": "git",
28 | "url": "git+https://github.com/ml5js/ml5-library.git"
29 | },
30 | "keywords": [
31 | "machine learning"
32 | ],
33 | "author": "NYU ITP (https://github.com/ml5js)",
34 | "license": "ISC",
35 | "bugs": {
36 | "url": "https://github.com/ml5js/ml5-library/issues"
37 | },
38 | "homepage": "https://github.com/ml5js/ml5-library#readme",
39 | "devDependencies": {
40 | "all-contributors-cli": "^6.10.0",
41 | "babel-cli": "6.26.0",
42 | "babel-core": "6.26.0",
43 | "babel-loader": "7.1.4",
44 | "babel-plugin-transform-object-rest-spread": "^6.26.0",
45 | "babel-plugin-transform-runtime": "^6.23.0",
46 | "babel-polyfill": "6.26.0",
47 | "babel-preset-env": "1.6.1",
48 | "babel-register": "6.26.0",
49 | "commitizen": "4.0.3",
50 | "cz-conventional-changelog": "2.1.0",
51 | "docsify-cli": "^4.3.0",
52 | "eslint": "4.18.2",
53 | "eslint-config-airbnb-base": "12.1.0",
54 | "eslint-config-prettier": "^4.1.0",
55 | "eslint-loader": "2.0.0",
56 | "eslint-plugin-import": "2.9.0",
57 | "extract-text-webpack-plugin": "4.0.0-beta.0",
58 | "ghooks": "2.0.2",
59 | "html-webpack-plugin": "^3.0.7",
60 | "jasmine-core": "3.1.0",
61 | "karma": "2.0.0",
62 | "karma-browserstack-launcher": "^1.5.1",
63 | "karma-chrome-launcher": "2.2.0",
64 | "karma-jasmine": "1.1.1",
65 | "karma-mocha-reporter": "^2.2.5",
66 | "karma-safari-launcher": "1.0.0",
67 | "karma-webpack": "3.0.0",
68 | "npm-run-all": "^4.1.2",
69 | "regenerator-runtime": "0.11.1",
70 | "rimraf": "2.6.2",
71 | "semantic-release": "^15.13.26",
72 | "uglifyjs-webpack-plugin": "^1.2.5",
73 | "webpack": "4.1.1",
74 | "webpack-cli": "2.0.10",
75 | "webpack-dev-server": "3.1.11",
76 | "webpack-merge": "^4.1.2"
77 | },
78 | "config": {
79 | "commitizen": {
80 | "path": "node_modules/cz-conventional-changelog"
81 | }
82 | },
83 | "files": [
84 | "dist",
85 | "README.md"
86 | ],
87 | "babel": {
88 | "presets": [
89 | [
90 | "env"
91 | ]
92 | ],
93 | "plugins": [
94 | "transform-runtime",
95 | "transform-object-rest-spread"
96 | ]
97 | },
98 | "dependencies": {
99 | "@magenta/sketch": "0.2.0",
100 | "@tensorflow-models/body-pix": "1.1.2",
101 | "@tensorflow-models/coco-ssd": "^2.0.0",
102 | "@tensorflow-models/knn-classifier": "1.2.1",
103 | "@tensorflow-models/mobilenet": "2.0.3",
104 | "@tensorflow-models/posenet": "2.1.3",
105 | "@tensorflow-models/speech-commands": "0.3.9",
106 | "@tensorflow/tfjs": "~1.2.1",
107 | "@tensorflow/tfjs-vis": "^1.1.0",
108 | "events": "^3.0.0",
109 | "face-api.js": "0.20.1"
110 | }
111 | }
112 |
--------------------------------------------------------------------------------
/src/NeuralNetwork/NeuralNetworkVis.js:
--------------------------------------------------------------------------------
1 | // import * as tf from '@tensorflow/tfjs';
2 | import * as tfvis from '@tensorflow/tfjs-vis';
3 | // https://js.tensorflow.org/api_vis/latest/#render.barchart
4 |
5 | class NeuralNetworkVis {
6 |
7 | constructor() {
8 | // TODO:
9 | this.config = {
10 | height: 300,
11 | };
12 |
13 | // store tfvis here for now so people can access it
14 | // through ml5?
15 | this.tfvis = tfvis;
16 | }
17 |
18 | /**
19 | * creates a scatterplot from 1 input variable and 1 output variable
20 | * @param {*} inputLabel
21 | * @param {*} outputLabel
22 | * @param {*} data
23 | */
24 | scatterplot(inputLabel, outputLabel, data) {
25 |
26 | const values = data.map(item => {
27 | return {
28 | x: item.xs[inputLabel],
29 | y: item.ys[outputLabel]
30 | }
31 | });
32 |
33 | const visOptions = {
34 | name: 'debug mode'
35 | }
36 | const chartOptions = {
37 | xLabel: 'X',
38 | yLabel: 'Y',
39 | height: this.config.height
40 | }
41 |
42 | tfvis.render.scatterplot(visOptions, values, chartOptions)
43 |
44 | }
45 |
46 | /**
47 | * creates a scatterplot from all input variables and all output variables
48 | * @param {*} inputLabels
49 | * @param {*} outputLabels
50 | * @param {*} data
51 | */
52 | scatterplotAll(inputLabels, outputLabels, data) {
53 | let values = [];
54 |
55 | inputLabels.forEach(inputLabel => {
56 | outputLabels.forEach(outputLabel => {
57 |
58 | const val = data.map(item => {
59 | return {
60 | x: item.xs[inputLabel],
61 | y: item.ys[outputLabel]
62 | }
63 | });
64 |
65 | values = [...values, ...val];
66 | })
67 | })
68 |
69 | const visOptions = {
70 | name: 'debug mode'
71 | }
72 |
73 | const chartOptions = {
74 | xLabel: 'X',
75 | yLabel: 'Y',
76 | height: this.config.height
77 | }
78 |
79 | tfvis.render.scatterplot(visOptions, values, chartOptions)
80 | }
81 |
82 | /**
83 | * creates a barchart from 1 input label and 1 output label
84 | * @param {*} inputLabel
85 | * @param {*} outputLabel
86 | * @param {*} data
87 | */
88 | barchart(inputLabel, outputLabel, data) {
89 |
90 | const values = data.map(item => {
91 | return {
92 | value: item.xs[inputLabel],
93 | index: item.ys[outputLabel]
94 | }
95 | });
96 |
97 | const chartOptions = {
98 | xLabel: 'label',
99 | yLabel: 'value',
100 | height: this.config.height
101 | }
102 |
103 | console.log(chartOptions)
104 | // Render to visor
105 | const surface = {
106 | name: 'Bar chart'
107 | };
108 | tfvis.render.barchart(surface, values);
109 | }
110 |
111 | /**
112 | * create a confusion matrix
113 | * @param {*} inputLabels
114 | * @param {*} outputLabels
115 | * @param {*} data
116 | */
117 | // confusionMatrix(inputLabels, outputLabels, data) {
118 |
119 | // }
120 |
121 |
122 | /**
123 | * Visualize the training of the neural net
124 | */
125 | trainingVis(){
126 | return tfvis.show.fitCallbacks({
127 | name: 'Training Performance'
128 | },
129 | ['loss', 'accuracy'], {
130 | height: this.config.height,
131 | callbacks: ['onEpochEnd']
132 | }
133 | )
134 | }
135 |
136 |
137 | }
138 |
139 | export default NeuralNetworkVis
--------------------------------------------------------------------------------
/docs/reference/unet.md:
--------------------------------------------------------------------------------
1 | # UNET
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 | ## Description
10 |
11 | The U-Net is a convolutional neural network that was developed for biomedical image segmentation at the Computer Science Department of the University of Freiburg, Germany.[1] The network is based on the fully convolutional network [2] and its architecture was modified and extended to work with fewer training images and to yield more precise segmentations.
12 |
13 | UNET allows you to segment an image.
14 |
15 | The ml5 unet `face` allows you to remove, for example, the background from video of the upper body of person.
16 |
17 |
18 | ## Quickstart
19 |
20 | ```js
21 | // load your model...
22 | const uNet = ml5.uNet('face');
23 |
24 | // assuming you have an HTMLVideo feed...
25 | uNet.segment(video, gotResult);
26 |
27 | function gotResult(error, result) {
28 | // if there's an error return it
29 | if (error) {
30 | console.error(error);
31 | return;
32 | }
33 | // log your result
34 | console.log(result)
35 | }
36 | ```
37 |
38 |
39 | ## Usage
40 |
41 | ### Initialize
42 |
43 | ```js
44 | const unet = ml5.uNet(model)
45 | // OR
46 | const unet = ml5.uNet(model, ?callback)
47 | ```
48 |
49 | #### Parameters
50 | * **model**: A string to the path of the JSON model.
51 | * **callback**: Optional. A callback function that is called once the model has loaded. If no callback is provided, it will return a promise that will be resolved once the model has loaded.
52 |
53 |
54 | ### Properties
55 |
56 |
57 | ***
58 | #### .ready
59 | > *Boolean*. Boolean value that specifies if the model has loaded.
60 | ***
61 |
62 |
63 | ### Methods
64 |
65 |
70 | ***
71 | #### .segment()
72 | > segments the image
73 |
74 | ```js
75 | unet.segment(?video, ?callback);
76 | ```
77 |
78 | 📥 **Inputs**
79 | * **video**: Optional. A HTML video element or a p5 video element.
80 | * **callback**: Optional. A function to run once the model has been loaded.
81 |
82 | 📤 **Outputs**
83 |
84 | * **Object**: Returns an Object.
85 | ```js
86 | {
87 | segmentation:mask,
88 | blob: {
89 | featureMask: *Blob*,
90 | backgroundMask: *Blob*
91 | },
92 | tensor: {
93 | featureMask: *Tensor*,
94 | backgroundMask: *Tensor*,
95 | },
96 | raw: {
97 | featureMask: *ImageData*,
98 | backgroundMask: *ImageData*
99 | },
100 | // returns if p5 is available
101 | featureMask: *p5Image*,
102 | backgroundMask: *p5Image*,
103 | mask: *p5Image*
104 | };
105 | ```
106 |
107 | ***
108 |
109 |
110 | ## Examples
111 |
112 | **p5.js**
113 | * [UNET_webcam](https://github.com/ml5js/ml5-examples/tree/development/p5js/UNET/UNET_webcam)
114 |
115 | **p5 web editor**
116 | * [UNET_webcam](https://editor.p5js.org/ml5/sketches/UNET_webcam)
117 |
118 | **plain javascript**
119 | * [UNET_webcam](https://github.com/ml5js/ml5-examples/tree/development/javascript/UNET/UNET_webcam)
120 |
121 |
122 | ## Demo
123 |
124 | No demos yet - contribute one today!
125 |
126 | ## Tutorials
127 |
128 | No tutorials yet - contribute one today!
129 |
130 |
131 | ## Acknowledgements
132 |
133 | **Contributors**:
134 | * Developed by [Zaid Alyafeai](https://github.com/zaidalyafeai)
135 | * Additional contributions by [Joey Lee](https://github.com/joeyklee)
136 |
137 | **Credits**:
138 | * UNET 'face' was trained by [Zaid Alyafeai](https://github.com/zaidalyafeai) using [mut1ny - Face/Head segmentation dataset](http://www.mut1ny.com/face-headsegmentation-dataset).
139 |
140 | ## Source Code
141 |
142 | * [/src/UNET/](https://github.com/ml5js/ml5-library/tree/development/src/UNET)
143 |
--------------------------------------------------------------------------------
/src/ImageClassifier/index_test.js:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2018 ml5
2 | //
3 | // This software is released under the MIT License.
4 | // https://opensource.org/licenses/MIT
5 |
6 | const {
7 | imageClassifier
8 | } = ml5;
9 |
10 | const TM_URL = 'https://storage.googleapis.com/tm-models/WfgKPytY/model.json';
11 |
12 | const DEFAULTS = {
13 | learningRate: 0.0001,
14 | hiddenUnits: 100,
15 | epochs: 20,
16 | numClasses: 2,
17 | batchSize: 0.4,
18 | topk: 3,
19 | alpha: 1,
20 | version: 2,
21 | };
22 |
23 | async function getImage() {
24 | const img = new Image();
25 | img.crossOrigin = true;
26 | img.src = 'https://cdn.jsdelivr.net/gh/ml5js/ml5-library@development/assets/bird.jpg';
27 | await new Promise((resolve) => {
28 | img.onload = resolve;
29 | });
30 | return img;
31 | }
32 |
33 | async function getCanvas() {
34 | const img = await getImage();
35 | const canvas = document.createElement('canvas');
36 | canvas.width = img.width;
37 | canvas.height = img.height;
38 | canvas.getContext('2d').drawImage(img, 0, 0);
39 | return canvas;
40 | }
41 |
42 | describe('imageClassifier', () => {
43 | let classifier;
44 |
45 | /**
46 | * Test imageClassifier with teachable machine
47 | */
48 | // Teachable machine model
49 | describe('with Teachable Machine model', () => {
50 |
51 | beforeAll(async () => {
52 | jasmine.DEFAULT_TIMEOUT_INTERVAL = 15000;
53 | classifier = await imageClassifier(TM_URL, undefined, {});
54 | });
55 |
56 | describe('instantiate', () => {
57 | it('Should create a classifier with all the defaults', async () => {
58 | expect(classifier.modelUrl).toBe(TM_URL);
59 | });
60 | });
61 |
62 | });
63 |
64 |
65 |
66 | /**
67 | * Test imageClassifier with Mobilenet
68 | */
69 | describe('imageClassifier with Mobilenet', () => {
70 |
71 | beforeAll(async () => {
72 | jasmine.DEFAULT_TIMEOUT_INTERVAL = 15000;
73 | classifier = await imageClassifier('MobileNet', undefined, {});
74 | });
75 |
76 | describe('instantiate', () => {
77 |
78 | it('Should create a classifier with all the defaults', async () => {
79 | expect(classifier.version).toBe(DEFAULTS.version);
80 | expect(classifier.alpha).toBe(DEFAULTS.alpha);
81 | expect(classifier.topk).toBe(DEFAULTS.topk);
82 | expect(classifier.ready).toBeTruthy();
83 | });
84 | })
85 |
86 | describe('classify', () => {
87 |
88 | it('Should classify an image of a Robin', async () => {
89 | const img = await getImage();
90 | await classifier.classify(img)
91 | .then(results => expect(results[0].label).toBe('robin, American robin, Turdus migratorius'));
92 | });
93 |
94 | it('Should support p5 elements with an image on .elt', async () => {
95 | const img = await getImage();
96 | await classifier.classify({
97 | elt: img
98 | })
99 | .then(results => expect(results[0].label).toBe('robin, American robin, Turdus migratorius'));
100 | });
101 |
102 | it('Should support HTMLCanvasElement', async () => {
103 | const canvas = await getCanvas();
104 | await classifier.classify(canvas)
105 | .then(results => expect(results[0].label).toBe('robin, American robin, Turdus migratorius'));
106 | });
107 |
108 | it('Should support p5 elements with canvas on .canvas', async () => {
109 | const canvas = await getCanvas();
110 | await classifier.classify({
111 | canvas
112 | })
113 | .then(results => expect(results[0].label).toBe('robin, American robin, Turdus migratorius'));
114 | });
115 |
116 | it('Should support p5 elements with canvas on .elt', async () => {
117 | const canvas = await getCanvas();
118 | await classifier.classify({
119 | elt: canvas
120 | })
121 | .then(results => expect(results[0].label).toBe('robin, American robin, Turdus migratorius'));
122 | });
123 | });
124 |
125 | });
126 |
127 | })
--------------------------------------------------------------------------------
/docs/reference/style-transfer.md:
--------------------------------------------------------------------------------
1 | # StyleTransfer
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 | ## Description
10 |
11 | Style Transfer is a machine learning technique that allows to transfer the style of one image into another one. This is a two step process, first you need to train a model on one particular style and then you can apply this style to another image.
12 |
13 | You can train your own style transfer model by following [this tutorial](https://github.com/ml5js/training-styletransfer).
14 |
15 | This implementation is heavily based on [fast-style-transfer-deeplearnjs](https://github.com/reiinakano/fast-style-transfer-deeplearnjs) by [Reiichiro Nakano](https://github.com/reiinakano).
16 | The [original TensorFlow implementation](https://github.com/lengstrom/fast-style-transfer) was developed by [Logan Engstrom](https://github.com/lengstrom)
17 |
18 | ## Quickstart
19 |
20 | ```js
21 | // Create a new Style Transfer Instance
22 | const style = ml5.styleTransfer("data/myModel/", modelLoaded);
23 |
24 | // When the model is loaded
25 | function modelLoaded() {
26 | console.log("Model Loaded!");
27 | }
28 | // Grab a img element and generate a new image.
29 | style.transfer(document.getElementById("img"), function(err, resultImg) {
30 | img.src = resultImg.src;
31 | });
32 | ```
33 |
34 |
35 | ## Usage
36 |
37 | ### Initialize
38 |
39 | ```js
40 | const styletransfer = ml5.styleTransfer(model, ?callback)
41 | // OR
42 | const styletransfer = ml5.styleTransfer(model, ?video, ?callback)
43 | ```
44 |
45 | #### Parameters
46 | * **model**: The path to Style Transfer model.
47 | * **video**: Optional. A HTML video element or a p5 video element.
48 | * **callback**: Optional. A function to be called once the model is loaded. If no callback is provided, it will return a promise that will be resolved once the model has loaded.
49 |
50 |
51 | ### Properties
52 |
53 |
54 | ***
55 | #### .ready
56 | > *Boolean*. Boolean value that specifies if the model has loaded.
57 | ***
58 |
59 |
60 | ### Methods
61 |
62 |
63 | ***
64 | #### .transfer()
65 | > Apply style transfer to an input.
66 |
67 | ```js
68 | styletransfer.transfer(?callback)
69 | // OR
70 | styletransfer.transfer(input, ?callback)
71 | ```
72 |
73 | 📥 **Inputs**
74 |
75 | * **input**: A HTML video or image element or a p5 image or video element. If no input is provided, the default is to use the video element given in the constructor.
76 | * **callback**: Optional. A function to run once the model has made the transfer. If no callback is provided, it will return a promise that will be resolved once the model has made the transfer.
77 |
78 | 📤 **Outputs**
79 |
80 | * **Image**: Returns an HTML img element.
81 |
82 | ***
83 |
84 |
85 | ## Examples
86 |
87 |
88 | **p5.js**
89 | * [StyleTransfer_Image](https://github.com/ml5js/ml5-examples/tree/development/p5js/StyleTransfer/StyleTransfer_Image)
90 | * [StyleTransfer_Video](https://github.com/ml5js/ml5-examples/tree/development/p5js/StyleTransfer/StyleTransfer_Video)
91 |
92 | **p5 web editor**
93 | * [StyleTransfer_Image](https://editor.p5js.org/ml5/sketches/StyleTransfer_Image)
94 | * [StyleTransfer_Video](https://editor.p5js.org/ml5/sketches/StyleTransfer_Video)
95 |
96 | **plain javascript**
97 | * [StyleTransfer_Image](https://github.com/ml5js/ml5-examples/tree/development/javascript/StyleTransfer/StyleTransfer_Image)
98 | * [StyleTransfer_Video](https://github.com/ml5js/ml5-examples/tree/development/javascript/StyleTransfer/StyleTransfer_Video)
99 |
100 | ## Demo
101 |
102 | No demos yet - contribute one today!
103 |
104 | ## Tutorials
105 |
106 | No tutorials yet - contribute one today!
107 |
108 | ## Acknowledgements
109 |
110 | **Contributors**:
111 | * Yining Shi
112 |
113 | **Credits**:
114 | * Paper Reference | Website URL | Github Repo | Book reference | etc
115 |
116 | ## Source Code
117 |
118 | * [/src/StyleTransfer/](https://github.com/ml5js/ml5-library/tree/development/src/StyleTransfer)
119 |
--------------------------------------------------------------------------------
/docs/reference/object-detector.md:
--------------------------------------------------------------------------------
1 | # Object Detector
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 | ## Description
10 |
11 | Real-time object detection system using either [YOLO](https://pjreddie.com/darknet/yolo/) or [CocoSsd](https://github.com/tensorflow/tfjs-models/tree/master/coco-ssd) model.
12 |
13 | ## Quickstart
14 |
15 | ```js
16 | const video = document.getElementById("video");
17 |
18 | // Create a ObjectDetector method
19 | const objectDetector = ml5.ObjectDetector('cocossd', {}, modelLoaded);
20 |
21 | // When the model is loaded
22 | function modelLoaded() {
23 | console.log("Model Loaded!");
24 | }
25 |
26 | // Detect objects in the video element
27 | yolo.detect(video, function(err, results) {
28 | console.log(results); // Will output bounding boxes of detected objects
29 | });
30 | ```
31 |
32 |
33 | ## Usage
34 |
35 | ### Initialize
36 |
37 | ```js
38 | const objectDetector = ml5.ObjectDetector(modelNameOrUrl);
39 | // OR
40 | const objectDetector = ml5.ObjectDetector(modelNameOrUrl, ?options, ?callback)
41 | ```
42 |
43 | #### Parameters
44 | * **modelNameOrUrl**: A String value of a valid model OR a url to a `model.json` that contains a pre-trained model. Models available are: ['cocossd'](https://github.com/tensorflow/tfjs-models/tree/master/coco-ssd), ['yolo'](https://pjreddie.com/darknet/yolo/)
45 | * **options**: Optional. An object describing a model accuracy and performance. For YOLO this are: `{ filterBoxesThreshold: 0.01, IOUThreshold: 0.4, classProbThreshold: 0.4 }`
46 | * **callback**: Optional. A function to run once the model has been loaded. If no callback is provided, it will return a promise that will be resolved once the model has loaded.
47 |
48 | ### Methods
49 |
50 |
51 | ***
52 | #### .detect()
53 | > Given an image or video, returns an array of objects containing class names, bounding boxes and probabilities.
54 |
55 | ```js
56 | objectDetector.detect(input, ?callback)
57 | ```
58 |
59 | 📥 **Inputs**
60 |
61 | * **input**: A HTML video or image element or a p5 image or video element. If no input is provided, the default is to use the video given in the constructor.
62 | * **callback**: A function to run once the model has made the prediction. If no callback is provided, it will return a promise that will be resolved once the model has made a prediction.
63 |
64 | 📤 **Outputs**
65 |
66 | * **Object**: returns an array of objects containing class names, bounding boxes and probabilities.
67 |
68 | ***
69 |
70 |
71 | ## Examples
72 |
73 |
74 | **p5.js**
75 | * [COCOSSD_Video](https://github.com/ml5js/ml5-examples/tree/development/p5js/ObjectDetector/COCOSSD_Video)
76 | * [COCOSSD_single_image](https://github.com/ml5js/ml5-examples/tree/development/p5js/ObjectDetector/COCOSSD_Video)
77 | * [YOLO_single_image](https://github.com/ml5js/ml5-examples/tree/development/p5js/ObjectDetector/YOLO_single_image)
78 | * [YOLO_Video](https://github.com/ml5js/ml5-examples/tree/development/p5js/ObjectDetector/YOLO_Video)
79 |
80 | **p5 web editor**
81 | * [YOLO_single_image](https://editor.p5js.org/ml5/sketches/YOLO_single_image)
82 | * [YOLO_webcam](https://editor.p5js.org/ml5/sketches/YOLO_webcam)
83 |
84 | **plain javascript**
85 | * [COCOSSD_single_image](https://github.com/ml5js/ml5-examples/tree/development/javascript/ObjectDetector/COCOSSD_single_image)
86 | * [COCOSSD_webcam](https://github.com/ml5js/ml5-examples/tree/development/javascript/ObjectDetector/COCOSSD_webcam)
87 | * [YOLO_single_image](https://github.com/ml5js/ml5-examples/tree/development/javascript/ObjectDetector/YOLO_single_image)
88 | * [YOLO_webcam](https://github.com/ml5js/ml5-examples/tree/development/javascript/ObjectDetector/YOLO_webcam)
89 |
90 | ## Demo
91 |
92 | No demos yet - contribute one today!
93 |
94 | ## Tutorials
95 |
96 | No tutorials yet - contribute one today!
97 |
98 | ## Acknowledgements
99 |
100 | **Contributors**:
101 | * Cristobal Valenzuela
102 |
103 | **Credits**:
104 | * Paper Reference | Website URL | Github Repo | Book reference | etc
105 |
106 | ## Source Code
107 |
108 | * [/src/ObjectDetector](https://github.com/ml5js/ml5-library/tree/development/src/ObjectDetector)
109 |
--------------------------------------------------------------------------------
/docs/reference/sketchrnn.md:
--------------------------------------------------------------------------------
1 | # SketchRnn
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 | ## Description
10 |
11 | SketchRNN is a recurrent neural network model trained on millions of doodles collected from the [Quick, Draw! game](https://quickdraw.withgoogle.com/). The SketchRNN model can create new drawings (from a list of categories) based on an initial path.
12 |
13 | This original paper and implementation of SketchRNN was made in TensorFlow and ported to [Magenta.js](https://magenta.tensorflow.org/get-started/#magenta-js) by [David Ha](https://twitter.com/hardmaru). The ml5.js implementation was ported by [Reiichiro Nakano](https://github.com/reiinakano).
14 |
15 | The ml5 library includes [a list of supported SketchRNN models](https://github.com/ml5js/ml5-library/blob/master/src/SketchRNN/models.js).
16 |
17 | ## Quickstart
18 |
19 | ```js
20 | // Create a new SketchRNN Instance
21 | const model = ml5.sketchRNN("cat", modelReady);
22 |
23 | // When the model is loaded
24 | function modelReady() {
25 | console.log("SketchRNN Model Loaded!");
26 | }
27 | // Reset the model's current stat
28 | model.reset();
29 | // Generate a new stroke
30 | model.generate(gotSketch);
31 |
32 | function gotSketch(err, result) {
33 | // Do something with the result
34 | }
35 | ```
36 |
37 |
38 | ## Usage
39 |
40 | ### Initialize
41 |
42 | ```js
43 | const sketchrnn = ml5.sketchRNN(model, ?callback)
44 | ```
45 |
46 | #### Parameters
47 | * **model**: The name of the model to use.
48 | * **callback**: Optional. A function to be called once the model is loaded. If no callback is provided, it will return a promise that will be resolved once the model has loaded.
49 |
50 | ### Properties
51 |
52 | ***
53 | #### .ready
54 | > *Boolean*. Boolean value that specifies if the model has loaded.
55 | ***
56 |
57 |
58 | ### Methods
59 |
60 |
61 | ***
62 | #### .reset()
63 | > Reset the model's current state
64 |
65 | ```js
66 | sketchrnn.reset()
67 | ```
68 |
69 | 📥 **Inputs**
70 |
71 | * n/a
72 |
73 | 📤 **Outputs**
74 |
75 | * n/a
76 |
77 | ***
78 |
79 |
80 |
81 |
84 | ***
85 | #### .generate()
86 | > Generates a new sample with the current state.
87 |
88 | ```js
89 | sketchrnn.generate(?seed, ?options, ?callback)
90 | ```
91 |
92 | 📥 **Inputs**
93 | * **seed**: Optional. A seed to be passed to the model before generating a new stroke.
94 | * **options**: Optional. An object describing the options of the model.
95 | * **callback**: Optional. A function that will return a generated stroke. If no callback is provided, it will return a promise that will be resolved with a generated stroke.
96 |
97 | 📤 **Outputs**
98 |
99 | * **Object**: an object with the x and y location, if the pen is down, up, or if it has ended `{s.dx, s.dy, down, up, end}`.
100 |
101 | ***
102 |
103 |
104 |
105 |
106 |
107 | ## Examples
108 |
109 | **p5.js**
110 | * [SketchRNN_basic](https://github.com/ml5js/ml5-examples/tree/development/p5js/SketchRNN/SketchRNN_basic)
111 | * [SketchRNN_interactive](https://github.com/ml5js/ml5-examples/tree/development/p5js/SketchRNN/SketchRNN_interactive)
112 |
113 | **p5 web editor**
114 | * [SketchRNN_basic](https://editor.p5js.org/ml5/sketches/SketchRNN_basic)
115 | * [SketchRNN_interactive](https://editor.p5js.org/ml5/sketches/SketchRNN_interactive)
116 |
117 | **plain javascript**
118 | * [SketchRNN_basic](https://github.com/ml5js/ml5-examples/tree/development/javascript/SketchRNN/_basic)
119 | * [SketchRNN_interactive](https://github.com/ml5js/ml5-examples/tree/development/javascript/SketchRNN/SketchRNN_interactive)
120 |
121 |
122 | ## Demo
123 |
124 | No demos yet - contribute one today!
125 |
126 | ## Tutorials
127 |
128 | No tutorials yet - contribute one today!
129 |
130 |
131 | ## Acknowledgements
132 |
133 | **Contributors**:
134 | * Name 1
135 | * Name 2
136 |
137 | **Credits**:
138 | * Paper Reference | Website URL | Github Repo | Book reference | etc
139 |
140 |
141 |
142 | ## Source Code
143 |
144 | * [/src/SketchRNN/](https://github.com/ml5js/ml5-library/tree/development/src/SketchRNN)
145 |
146 |
--------------------------------------------------------------------------------
/docs/reference/pitch-detection.md:
--------------------------------------------------------------------------------
1 | # PitchDetection
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 | ## Description
10 |
11 | A pitch detection algorithm is a way of estimating the pitch or fundamental frequency of an audio signal. This method allows to use a pre-trained machine learning pitch detection model to estimate the pitch of sound file.
12 |
13 | At present ml5.js only supports the CREPE model. This model is a direct port of [github.com/marl/crepe](https://github.com/marl/crepe) and only works with direct input from the browser microphone.
14 |
15 | ## Quickstart
16 |
17 | ```js
18 | const audioContext = new AudioContext();
19 | // const MicStream = MicStream
20 | const pitch = ml5.pitchDetection(
21 | "./model/",
22 | audioContext,
23 | MicStream,
24 | modelLoaded
25 | );
26 |
27 | // When the model is loaded
28 | function modelLoaded() {
29 | console.log("Model Loaded!");
30 | }
31 |
32 | pitch.getPitch(function(err, frequency) {
33 | console.log(frequency);
34 | });
35 | ```
36 |
37 |
38 | ## Usage
39 |
40 | ### Initialize
41 |
42 | ```js
43 | const detector = ml5.pitchDetection(model, audioContext, stream, callback);
44 | ```
45 |
46 | #### Parameters
47 | * **model**: REQUIRED. The path to the trained model. Only [CREPE](https://github.com/marl/crepe) is available for now. Case insensitive.
48 | * **audioContext**: REQUIRED. The browser audioContext to use.
49 | * **stream MediaStream**: REQUIRED. The media stream to use.
50 | * **callback**: Optional. A callback to be called once the model has loaded. If no callback is provided, it will return a promise that will be resolved once the model has loaded.
51 |
52 | ### Properties
53 |
54 |
55 | ***
56 | #### .audioContext
57 | > the AudioContext instance. Contains sampleRate, currentTime, state, baseLatency.
58 | ***
59 |
60 | ***
61 | #### .model
62 | > the pitch detection model.
63 | ***
64 |
65 | ***
66 | #### .results
67 | > the current pitch prediction results from the classification model.
68 | ***
69 |
70 | ***
71 | #### .running
72 | > a boolean value stating whether the model instance is running or not.
73 | ***
74 |
75 | ***
76 | #### .stream
77 | > the MediaStream instance. Contains an id and a boolean `active` value.
78 | ***
79 |
80 |
81 |
82 | ### Methods
83 |
84 |
85 | ***
86 | #### .getPitch()
87 | > gets the pitch.
88 |
89 | ```js
90 | detector.getPitch(?callback)
91 | ```
92 |
93 | 📥 **Inputs**
94 |
95 | * **callback**: Optional. A function to be called when the model has generated content. If no callback is provided, it will return a promise that will be resolved once the model has predicted the pitch.
96 |
97 | 📤 **Outputs**
98 |
99 | * **Object**: Returns the pitch from the model attempting to predict the pitch.
100 |
101 | ***
102 |
103 |
104 | ## Examples
105 |
106 | **p5.js**
107 | * [PitchDetection](https://github.com/ml5js/ml5-examples/tree/development/p5js/PitchDetection/PitchDetection)
108 | * [PitchDetection_Game](https://github.com/ml5js/ml5-examples/tree/development/p5js/PitchDetection/PitchDetection_Game)
109 | * [PitchDetection_Piano](https://github.com/ml5js/ml5-examples/tree/development/p5js/PitchDetection/PitchDetection_Piano)
110 |
111 | **p5 web editor**
112 | * [PitchDetection](https://editor.p5js.org/ml5/sketches/PitchDetection)
113 | * [PitchDetection_Game](https://editor.p5js.org/ml5/sketches/PitchDetection_Game)
114 | * [PitchDetection_Piano](https://editor.p5js.org/ml5/sketches/PitchDetection_Piano)
115 |
116 | **plain javascript**
117 | * [PitchDetection](https://github.com/ml5js/ml5-examples/tree/development/javascript/PitchDetection/PitchDetection)
118 | * [PitchDetection_Game](https://github.com/ml5js/ml5-examples/tree/development/javascript/PitchDetection/PitchDetection_Game)
119 | * [PitchDetection_Piano](https://github.com/ml5js/ml5-examples/tree/development/javascript/PitchDetection/PitchDetection_Piano)
120 |
121 | ## Demo
122 |
123 | No demos yet - contribute one today!
124 |
125 | ## Tutorials
126 |
127 | No tutorials yet - contribute one today!
128 |
129 | ## Acknowledgements
130 |
131 | **Contributors**:
132 | * Hannah Davis
133 |
134 | **Credits**:
135 | * Paper Reference | Website URL | Github Repo | Book reference | etc
136 |
137 | ## Source Code
138 |
139 | [/src/PitchDetection](https://github.com/ml5js/ml5-library/tree/development/src/PitchDetection)
140 |
--------------------------------------------------------------------------------
/docs/reference/yolo.md:
--------------------------------------------------------------------------------
1 | # YOLO
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 | ## Description
10 |
11 | Note: the YOLO module is being deprecated, please refer to the [ObjectDetector](object-detector.md) module.
12 |
13 | You only look once ([YOLO](https://pjreddie.com/darknet/yolo/)) is a state-of-the-art, real-time object detection system.
14 |
15 | From the [creators](https://pjreddie.com/darknet/yolo/) website:
16 |
17 | *Prior detection systems repurpose classifiers or localizers to perform detection. They apply the model to an image at multiple locations and scales. High scoring regions of the image are considered detections.*
18 |
19 | *We use a totally different approach. We apply a single neural network to the full image. This network divides the image into regions and predicts bounding boxes and probabilities for each region. These bounding boxes are weighted by the predicted probabilities. [Source](https://pjreddie.com/darknet/yolo/)*
20 |
21 | This implementation is heavily derived from [ModelDepot](https://github.com/ModelDepot/tfjs-yolo-tiny).
22 |
23 | ## Quickstart
24 |
25 | ```js
26 | const video = document.getElementById("video");
27 |
28 | // Create a YOLO method
29 | const yolo = ml5.YOLO(video, modelLoaded);
30 |
31 | // When the model is loaded
32 | function modelLoaded() {
33 | console.log("Model Loaded!");
34 | }
35 |
36 | // Detect objects in the video element
37 | yolo.detect(function(err, results) {
38 | console.log(results); // Will output bounding boxes of detected objects
39 | });
40 | ```
41 |
42 |
43 | ## Usage
44 |
45 | ### Initialize
46 |
47 | ```js
48 | const yolo = ml5.YOLO();
49 | // OR
50 | const yolo = ml5.YOLO(video);
51 | // OR
52 | const yolo = ml5.YOLO(video, ?options, ?callback)
53 | // OR
54 | const yolo = ml5.YOLO(?options, ?callback)
55 | ```
56 |
57 | #### Parameters
58 | * **video**: Optional. A HTML video element or a p5 video element.
59 | * **options**: Optional. An object describing a model accuracy and performance. For MobileNet this are: `{ filterBoxesThreshold: 0.01, IOUThreshold: 0.4, classProbThreshold: 0.4 }`
60 | * **callback**: Optional. A function to run once the model has been loaded. If no callback is provided, it will return a promise that will be resolved once the model has loaded.
61 |
62 |
63 | ### Properties
64 |
65 |
66 | ***
67 | #### .isPredicting
68 | > *Boolean*. Boolean to check if the model is currently predicting
69 | ***
70 |
71 |
72 | ***
73 | #### .modelReady
74 | > *Object*. Boolean to check if the model has loaded
75 | ***
76 |
77 |
78 | ### Methods
79 |
80 |
81 | ***
82 | #### .detect()
83 | > Given an image or video, returns an array of objects containing class names, bounding boxes and probabilities.
84 |
85 | ```js
86 | yolo.detect(input, ?callback)
87 | // OR
88 | yolo.detect(?callback)
89 | ```
90 |
91 | 📥 **Inputs**
92 |
93 | * **input**: A HTML video or image element or a p5 image or video element. If no input is provided, the default is to use the video given in the constructor.
94 | * **callback**: A function to run once the model has made the prediction. If no callback is provided, it will return a promise that will be resolved once the model has made a prediction.
95 |
96 | 📤 **Outputs**
97 |
98 | * **Object**: returns an array of objects containing class names, bounding boxes and probabilities.
99 |
100 | ***
101 |
102 |
103 | ## Examples
104 |
105 |
106 | **p5.js**
107 | * [YOLO_single_image](https://github.com/ml5js/ml5-examples/tree/development/p5js/YOLO/YOLO_single_image)
108 | * [YOLO_webcam](https://github.com/ml5js/ml5-examples/tree/development/p5js/YOLO/YOLO_webcam)
109 |
110 | **p5 web editor**
111 | * [YOLO_single_image](https://editor.p5js.org/ml5/sketches/YOLO_single_image)
112 | * [YOLO_webcam](https://editor.p5js.org/ml5/sketches/YOLO_webcam)
113 |
114 | **plain javascript**
115 | * [YOLO_single_image](https://github.com/ml5js/ml5-examples/tree/development/javascript/YOLO/YOLO_single_image)
116 | * [YOLO_webcam](https://github.com/ml5js/ml5-examples/tree/development/javascript/YOLO/YOLO_webcam)
117 |
118 | ## Demo
119 |
120 | No demos yet - contribute one today!
121 |
122 | ## Tutorials
123 |
124 | No tutorials yet - contribute one today!
125 |
126 | ## Acknowledgements
127 |
128 | **Contributors**:
129 | * Cristobal Valenzuela
130 |
131 | **Credits**:
132 | * Paper Reference | Website URL | Github Repo | Book reference | etc
133 |
134 | ## Source Code
135 |
136 | * [/src/YOLO](https://github.com/ml5js/ml5-library/tree/development/src/YOLO)
137 |
--------------------------------------------------------------------------------
/src/Sentiment/index.js:
--------------------------------------------------------------------------------
1 | import * as tf from '@tensorflow/tfjs';
2 | import callCallback from '../utils/callcallback';
3 | import modelLoader from '../utils/modelLoader';
4 |
5 | /**
6 | * Initializes the Sentiment demo.
7 | */
8 |
9 | const OOV_CHAR = 2;
10 | const PAD_CHAR = 0;
11 |
12 | function padSequences(sequences, maxLen, padding = 'pre', truncating = 'pre', value = PAD_CHAR) {
13 | return sequences.map((seq) => {
14 | // Perform truncation.
15 | if (seq.length > maxLen) {
16 | if (truncating === 'pre') {
17 | seq.splice(0, seq.length - maxLen);
18 | } else {
19 | seq.splice(maxLen, seq.length - maxLen);
20 | }
21 | }
22 | // Perform padding.
23 | if (seq.length < maxLen) {
24 | const pad = [];
25 | for (let i = 0; i < maxLen - seq.length; i += 1) {
26 | pad.push(value);
27 | }
28 | if (padding === 'pre') {
29 | // eslint-disable-next-line no-param-reassign
30 | seq = pad.concat(seq);
31 | } else {
32 | // eslint-disable-next-line no-param-reassign
33 | seq = seq.concat(pad);
34 | }
35 | }
36 | return seq;
37 | });
38 | }
39 |
40 | class Sentiment {
41 | /**
42 | * Create Sentiment model. Currently the supported model name is 'moviereviews'. ml5 may support different models in the future.
43 | * @param {String} modelName - A string to the path of the JSON model.
44 | * @param {function} callback - Optional. A callback function that is called once the model has loaded. If no callback is provided, it will return a promise that will be resolved once the model has loaded.
45 | */
46 | constructor(modelName, callback) {
47 | /**
48 | * Boolean value that specifies if the model has loaded.
49 | * @type {boolean}
50 | * @public
51 | */
52 | this.ready = callCallback(this.loadModel(modelName), callback);
53 | }
54 |
55 | /**
56 | * Initializes the Sentiment demo.
57 | */
58 |
59 | async loadModel(modelName) {
60 |
61 | const movieReviews = {
62 | model: null,
63 | metadata: null,
64 | }
65 |
66 | if (modelName.toLowerCase() === 'moviereviews') {
67 |
68 | movieReviews.model = 'https://storage.googleapis.com/tfjs-models/tfjs/sentiment_cnn_v1/model.json';
69 | movieReviews.metadata = 'https://storage.googleapis.com/tfjs-models/tfjs/sentiment_cnn_v1/metadata.json';
70 |
71 | } else if(modelLoader.isAbsoluteURL(modelName) === true ) {
72 | const modelPath = modelLoader.getModelPath(modelName);
73 |
74 | movieReviews.model = `${modelPath}/model.json`;
75 | movieReviews.metadata = `${modelPath}/metadata.json`;
76 |
77 | } else {
78 | console.error('problem loading model');
79 | return this;
80 | }
81 |
82 |
83 | /**
84 | * The model being used.
85 | * @type {model}
86 | * @public
87 | */
88 | this.model = await tf.loadLayersModel(movieReviews.model);
89 | const metadataJson = await fetch(movieReviews.metadata);
90 | const sentimentMetadata = await metadataJson.json();
91 |
92 | this.indexFrom = sentimentMetadata.index_from;
93 | this.maxLen = sentimentMetadata.max_len;
94 |
95 | this.wordIndex = sentimentMetadata.word_index;
96 | this.vocabularySize = sentimentMetadata.vocabulary_size;
97 |
98 | return this;
99 | }
100 |
101 | /**
102 | * Scores the sentiment of given text with a value between 0 ("negative") and 1 ("positive").
103 | * @param {String} text - string of text to predict.
104 | * @returns {{score: Number}}
105 | */
106 | predict(text) {
107 | // Convert to lower case and remove all punctuations.
108 | const inputText =
109 | text.trim().toLowerCase().replace(/[.,?!]/g, '').split(' ');
110 | // Convert the words to a sequence of word indices.
111 |
112 | const sequence = inputText.map((word) => {
113 | let wordIndex = this.wordIndex[word] + this.indexFrom;
114 | if (wordIndex > this.vocabularySize) {
115 | wordIndex = OOV_CHAR;
116 | }
117 | return wordIndex;
118 | });
119 |
120 | // Perform truncation and padding.
121 | const paddedSequence = padSequences([sequence], this.maxLen);
122 | const input = tf.tensor2d(paddedSequence, [1, this.maxLen]);
123 | const predictOut = this.model.predict(input);
124 | const score = predictOut.dataSync()[0];
125 | predictOut.dispose();
126 | input.dispose();
127 |
128 | return {
129 | score
130 | };
131 | }
132 | }
133 |
134 | const sentiment = (modelName, callback) => new Sentiment(modelName, callback);
135 |
136 | export default sentiment;
--------------------------------------------------------------------------------
/docs/reference/sound-classifier.md:
--------------------------------------------------------------------------------
1 | # SoundClassifier
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 | ## Description
10 |
11 | The ml5.soundClassifier() allows you to classify audio. With the right pre-trained models, you can detect whether a certain noise was made (e.g. a clapping sound or a whistle) or a certain word was said (e.g. Up, Down, Yes, No). At this moment, with the ml5.soundClassifier(), you can use your own custom pre-trained speech commands or use the the "SpeechCommands18w" which can recognize "the ten digits from "zero" to "nine", "up", "down", "left", "right", "go", "stop", "yes", "no", as well as the additional categories of "unknown word" and "background noise"."
12 |
13 | **Train your own sound classifier model with Teachable Machine**: If you'd like to train your own custom sound classification model, try [Google's Teachable Machine - coming soon!](https://teachablemachine.withgoogle.com/io19)
14 |
15 | ## Quickstart
16 |
17 | ```js
18 | // Options for the SpeechCommands18w model, the default probabilityThreshold is 0
19 | const options = { probabilityThreshold: 0.7 };
20 | const classifier = ml5.soundClassifier('SpeechCommands18w', options, modelReady);
21 |
22 | function modelReady() {
23 | // classify sound
24 | classifier.classify(gotResult);
25 | }
26 |
27 | function gotResult(error, result) {
28 | if (error) {
29 | console.log(error);
30 | return;
31 | }
32 | // log the result
33 | console.log(result);
34 | }
35 | ```
36 |
37 |
38 | ## Usage
39 |
40 | ### Initialize
41 |
42 | ```js
43 | const soundclassifier = ml5.soundClassifier(?model, ?options, ?callback)
44 | ```
45 |
46 | By default the soundClassifier will start the default microphone.
47 |
48 | #### Parameters
49 | * **model**: Optional. Model name or URL path to a `model.json`. Here are some options:
50 | * `SpeechCommands18w`: loads the 18w speech commands
51 | ```js
52 | const classifier = ml5.soundClassifier('SpeechCommands18w', modelReady);
53 | ```
54 | * Custom model made in Google's Teachable Machine:
55 | ```js
56 | const classifier = ml5.soundClassifier('path/to/model.json', modelReady);
57 | ```
58 | * **callback**: Optional. A function to run once the model has been loaded.
59 | * **options**: Optional. An object describing a model accuracy and performance. The available parameters are:
60 |
61 | ```
62 | {
63 | probabilityThreshold: 0.7 // probabilityThreshold is 0
64 | }
65 | ```
66 |
67 | ### Properties
68 |
69 |
70 | ***
71 | #### .model
72 | > *Object*. The model.
73 | ***
74 |
75 |
76 | ### Methods
77 |
78 |
79 | ***
80 | #### .classify()
81 | > Given a number, will make magicSparkles
82 |
83 | ```js
84 | soundclassifier.classify(callback);
85 | ```
86 |
87 | 📥 **Inputs**
88 | * **callback**: A function to handle the results of the classification
89 |
90 | 📤 **Outputs**
91 | * **Array**: Returns an array with "label" and "confidence".
92 |
93 | ***
94 |
95 |
96 | ## Examples
97 |
98 | **p5.js**
99 | * [SoundClassification_speechcommand](https://github.com/ml5js/ml5-examples/tree/development/p5js/SoundClassification/SoundClassification_speechcommand)
100 | * [SoundClassification_speechcommand_load](https://github.com/ml5js/ml5-examples/tree/development/p5js/SoundClassification/SoundClassification_speechcommand_load)
101 |
102 | **p5 web editor**
103 | * [SoundClassification_speechcommand](https://editor.p5js.org/ml5/sketches/SoundClassification_speechcommand)
104 | * [SoundClassification_speechcommand_load](https://editor.p5js.org/ml5/sketches/SoundClassification_speechcommand_load)
105 |
106 | **plain javascript**
107 | * [SoundClassification_speechcommand](https://github.com/ml5js/ml5-examples/tree/development/javascript/SoundClassification/SoundClassification_speechcommand)
108 | * [SoundClassification_speechcommand_load](https://github.com/ml5js/ml5-examples/tree/development/javascript/SoundClassification/SoundClassification_speechcommand_load)
109 |
110 |
111 |
112 | ## Demo
113 |
114 | No demos yet - contribute one today!
115 |
116 | ## Tutorials
117 |
118 | ### ml5.js: Sound Classification via CodingTrain
119 |
120 |
121 | ## Acknowledgements
122 |
123 | **Contributors**:
124 | * Yining Shi
125 |
126 | **Credits**:
127 | * Paper Reference | Website URL | Github Repo | Book reference | etc
128 |
129 |
130 |
131 |
132 | ## Source Code
133 |
134 | * [/src/SoundClassifier/](https://github.com/ml5js/ml5-library/tree/development/src/SoundClassifier)
135 |
--------------------------------------------------------------------------------
/src/CVAE/index.js:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2018 ml5
2 | //
3 | // This software is released under the MIT License.
4 | // https://opensource.org/licenses/MIT
5 |
6 | /* eslint prefer-destructuring: ["error", {AssignmentExpression: {array: false}}] */
7 | /* eslint no-await-in-loop: "off" */
8 | /*
9 | * CVAE: Run conditional auto-encoder for pro-trained model
10 | */
11 |
12 | import * as tf from '@tensorflow/tfjs';
13 | import callCallback from '../utils/callcallback';
14 |
15 | class Cvae {
16 | /**
17 | * Create a Conditional Variational Autoencoder (CVAE).
18 | * @param {String} modelPath - Required. The url path to your model.
19 | * @param {function} callback - Required. A function to run once the model has been loaded.
20 | */
21 | constructor(modelPath, callback) {
22 | /**
23 | * Boolean value that specifies if the model has loaded.
24 | * @type {boolean}
25 | * @public
26 | */
27 | this.ready = false;
28 | this.model = {};
29 | this.latentDim = tf.randomUniform([1, 16]);
30 | this.modelPath = modelPath;
31 | this.modelPathPrefix = '';
32 |
33 | this.jsonLoader().then(val => {
34 | this.modelPathPrefix = this.modelPath.split('manifest.json')[0]
35 | this.ready = callCallback(this.loadCVAEModel(this.modelPathPrefix+val.model), callback);
36 | this.labels = val.labels;
37 | // get an array full of zero with the length of labels [0, 0, 0 ...]
38 | this.labelVector = Array(this.labels.length+1).fill(0);
39 | });
40 | }
41 |
42 | // load tfjs model that is converted by tensorflowjs with graph and weights
43 | async loadCVAEModel(modelPath) {
44 | this.model = await tf.loadLayersModel(modelPath);
45 | return this;
46 | }
47 |
48 | /**
49 | * Generate a random result.
50 | * @param {String} label - A label of the feature your want to generate
51 | * @param {function} callback - A function to handle the results of ".generate()". Likely a function to do something with the generated image data.
52 | * @return {raw: ImageData, src: Blob, image: p5.Image}
53 | */
54 | async generate(label, callback) {
55 | return callCallback(this.generateInternal(label), callback);
56 | }
57 |
58 | loadAsync(url){
59 | return new Promise((resolve, reject) => {
60 | if(!this.ready) reject();
61 | loadImage(url, (img) => {
62 | resolve(img);
63 | });
64 | });
65 | };
66 |
67 | getBlob(inputCanvas) {
68 | return new Promise((resolve, reject) => {
69 | if (!this.ready) reject();
70 |
71 | inputCanvas.toBlob((blob) => {
72 | resolve(blob);
73 | });
74 | });
75 | }
76 |
77 | checkP5() {
78 | if (typeof window !== 'undefined' && window.p5 && this
79 | && window.p5.Image && typeof window.p5.Image === 'function') return true;
80 | return false;
81 | }
82 |
83 | async generateInternal(label) {
84 | const res = tf.tidy(() => {
85 | this.latentDim = tf.randomUniform([1, 16]);
86 | const cursor = this.labels.indexOf(label);
87 | if (cursor < 0) {
88 | console.log('Wrong input of the label!');
89 | return [undefined, undefined]; // invalid input just return;
90 | }
91 |
92 | this.labelVector = this.labelVector.map(() => 0); // clear vector
93 | this.labelVector[cursor+1] = 1;
94 |
95 | const input = tf.tensor([this.labelVector]);
96 |
97 | const temp = this.model.predict([this.latentDim, input]);
98 | return temp.reshape([temp.shape[1], temp.shape[2], temp.shape[3]]);
99 | });
100 |
101 |
102 | const raws = await tf.browser.toPixels(res);
103 | res.dispose();
104 |
105 | const canvas = document.createElement('canvas'); // consider using offScreneCanvas
106 | const ctx = canvas.getContext('2d');
107 | const [x, y] = res.shape;
108 | canvas.width = x;
109 | canvas.height = y;
110 | const imgData = ctx.createImageData(x, y);
111 | const data = imgData.data;
112 | for (let i = 0; i < x * y * 4; i += 1) data[i] = raws[i];
113 | ctx.putImageData(imgData, 0, 0);
114 |
115 | const src = URL.createObjectURL(await this.getBlob(canvas));
116 | let image;
117 | /* global loadImage */
118 | if (this.checkP5()) image = await this.loadAsync(src);
119 | return { src, raws, image };
120 | }
121 |
122 | async jsonLoader() {
123 | return new Promise((resolve, reject) => {
124 | const xhr = new XMLHttpRequest();
125 | xhr.open('GET', this.modelPath);
126 |
127 | xhr.onload = () => {
128 | const json = JSON.parse(xhr.responseText);
129 | resolve(json);
130 | };
131 | xhr.onerror = (error) => {
132 | reject(error);
133 | };
134 | xhr.send();
135 | });
136 | }
137 | }
138 |
139 | const CVAE = (model, callback) => new Cvae(model, callback);
140 |
141 |
142 | export default CVAE;
143 |
--------------------------------------------------------------------------------
/src/utils/imageUtilities.js:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2018 ml5
2 | //
3 | // This software is released under the MIT License.
4 | // https://opensource.org/licenses/MIT
5 |
6 | import * as tf from '@tensorflow/tfjs';
7 | import p5Utils from './p5Utils';
8 |
9 | // Resize video elements
10 | const processVideo = (input, size, callback = () => {}) => {
11 | const videoInput = input;
12 | const element = document.createElement('video');
13 | videoInput.onplay = () => {
14 | const stream = videoInput.captureStream();
15 | element.srcObject = stream;
16 | element.width = size;
17 | element.height = size;
18 | element.autoplay = true;
19 | element.playsinline = true;
20 | element.muted = true;
21 | callback();
22 | };
23 | return element;
24 | };
25 |
26 | // Converts a tf to DOM img
27 | const array3DToImage = (tensor) => {
28 | const [imgHeight, imgWidth] = tensor.shape;
29 | const data = tensor.dataSync();
30 | const canvas = document.createElement('canvas');
31 | canvas.width = imgWidth;
32 | canvas.height = imgHeight;
33 | const ctx = canvas.getContext('2d');
34 | const imageData = ctx.getImageData(0, 0, canvas.width, canvas.height);
35 |
36 | for (let i = 0; i < imgWidth * imgHeight; i += 1) {
37 | const j = i * 4;
38 | const k = i * 3;
39 | imageData.data[j + 0] = Math.floor(256 * data[k + 0]);
40 | imageData.data[j + 1] = Math.floor(256 * data[k + 1]);
41 | imageData.data[j + 2] = Math.floor(256 * data[k + 2]);
42 | imageData.data[j + 3] = 255;
43 | }
44 | ctx.putImageData(imageData, 0, 0);
45 |
46 | // Create img HTML element from canvas
47 | const dataUrl = canvas.toDataURL();
48 | const outputImg = document.createElement('img');
49 | outputImg.src = dataUrl;
50 | outputImg.style.width = imgWidth;
51 | outputImg.style.height = imgHeight;
52 | tensor.dispose();
53 | return outputImg;
54 | };
55 |
56 | // Static Method: crop the image
57 | const cropImage = (img) => {
58 | const size = Math.min(img.shape[0], img.shape[1]);
59 | const centerHeight = img.shape[0] / 2;
60 | const beginHeight = centerHeight - (size / 2);
61 | const centerWidth = img.shape[1] / 2;
62 | const beginWidth = centerWidth - (size / 2);
63 | return img.slice([beginHeight, beginWidth, 0], [size, size, 3]);
64 | };
65 |
66 | const flipImage = (img) => {
67 | // image image, bitmap, or canvas
68 | let imgWidth;
69 | let imgHeight;
70 | let inputImg;
71 |
72 | if (img instanceof HTMLImageElement ||
73 | img instanceof HTMLCanvasElement ||
74 | img instanceof HTMLVideoElement ||
75 | img instanceof ImageData) {
76 | inputImg = img;
77 | } else if (typeof img === 'object' &&
78 | (img.elt instanceof HTMLImageElement ||
79 | img.elt instanceof HTMLCanvasElement ||
80 | img.elt instanceof HTMLVideoElement ||
81 | img.elt instanceof ImageData)) {
82 |
83 | inputImg = img.elt; // Handle p5.js image
84 | } else if (typeof img === 'object' &&
85 | img.canvas instanceof HTMLCanvasElement) {
86 | inputImg = img.canvas; // Handle p5.js image
87 | } else {
88 | inputImg = img;
89 | }
90 |
91 | if (inputImg instanceof HTMLVideoElement) {
92 | // should be videoWidth, videoHeight?
93 | imgWidth = inputImg.width;
94 | imgHeight = inputImg.height;
95 | } else {
96 | imgWidth = inputImg.width;
97 | imgHeight = inputImg.height;
98 | }
99 |
100 |
101 | if (p5Utils.checkP5()) {
102 | const p5Canvas = p5Utils.p5Instance.createGraphics(imgWidth, imgHeight);
103 | p5Canvas.push()
104 | p5Canvas.translate(imgWidth, 0);
105 | p5Canvas.scale(-1, 1);
106 | p5Canvas.image(img, 0, 0, imgWidth, imgHeight);
107 | p5Canvas.pop()
108 |
109 | return p5Canvas;
110 | }
111 | const canvas = document.createElement('canvas');
112 | canvas.width = imgWidth;
113 | canvas.height = imgHeight;
114 |
115 | const ctx = canvas.getContext('2d');
116 | ctx.drawImage(inputImg, 0, 0, imgWidth, imgHeight);
117 | ctx.translate(imgWidth, 0);
118 | ctx.scale(-1, 1);
119 | ctx.drawImage(canvas, imgWidth * -1, 0, imgWidth, imgHeight);
120 | return canvas;
121 |
122 | }
123 |
124 | // Static Method: image to tf tensor
125 | function imgToTensor(input, size = null) {
126 | return tf.tidy(() => {
127 | let img = tf.browser.fromPixels(input);
128 | if (size) {
129 | img = tf.image.resizeBilinear(img, size);
130 | }
131 | const croppedImage = cropImage(img);
132 | const batchedImage = croppedImage.expandDims(0);
133 | return batchedImage.toFloat().div(tf.scalar(127)).sub(tf.scalar(1));
134 | });
135 | }
136 |
137 | function isInstanceOfSupportedElement(subject) {
138 | return (subject instanceof HTMLVideoElement
139 | || subject instanceof HTMLImageElement
140 | || subject instanceof HTMLCanvasElement
141 | || subject instanceof ImageData)
142 | }
143 |
144 | export {
145 | array3DToImage,
146 | processVideo,
147 | cropImage,
148 | imgToTensor,
149 | isInstanceOfSupportedElement,
150 | flipImage
151 | };
152 |
--------------------------------------------------------------------------------
/docs/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | ml5 - A friendly machine learning library for the web.
6 |
7 |
8 |
12 |
13 |
14 |
15 |
16 |
75 |
76 |
77 |
104 |
105 |
7 |
8 |
9 | ## Description
10 |
11 | You can use neural networks to generate new content. A Generative Adversarial Network (GAN) is a machine learning architecture where two neural networks are adversaries competing. One neural network is a "generator", it makes new images. The other is a "discriminator" and tries to guess if the image is "fake" (made by the generator) or "real" (from the training data). Once the discriminator can no longer guess correctly, the model is trained! A DCGAN is a Deep Convolutional Generative Adversarial Network.
12 |
13 | ml5.js provides a few default pre-trained models for DCGAN, but you may consider training your own DCGAN to generate images of things you're interested in.
14 | ## Quickstart
15 |
16 | ```js
17 | const dcgan = ml5.DCGAN('model/geo/manifest.json', modelReady);
18 |
19 | // When the model is loaded
20 | function modelReady() {
21 | // Generate a new image
22 | dcgan.generate(gotImage);
23 | }
24 |
25 | function gotImage(err, result) {
26 | if (err) {
27 | console.log(err);
28 | return;
29 | }
30 | // The generated image is in the result
31 | console.log(result);
32 | }
33 | ```
34 |
35 |
36 | ## Usage
37 |
38 | ### Initialize
39 |
40 | ```js
41 | const dcgan = ml5.DCGAN(modelPath, callback)
42 | ```
43 |
44 | #### Parameters
45 | * **modelPath**: REQUIRED. This will be a JSON object called `manifest.json` that contains information about your pre-trained GAN and the url to the `model.json` file that contains your pre-trained model. The `model` property can also point to an absolute URL e.g. `"https://raw.githubusercontent.com/ml5js/ml5-data-and-models/master/models/dcgan/face/model.json"`
46 |
47 | ```json
48 | {
49 | "description": "Aerial Images of Santiago, Chile 64x64 (16 MB)",
50 | "model": "model/geo/model.json",
51 | "modelSize": 64,
52 | "modelLatentDim": 128
53 | }
54 | ```
55 | * **callback**: Required. A function to run once the model has been loaded.
56 |
57 |
58 | ### Properties
59 |
60 |
61 |
62 | ***
63 | #### .modelReady
64 | > *Boolean*. Boolean value that specifies if the model has loaded.
65 | ***
66 |
67 |
68 | ***
69 | #### .model
70 | > *Object*. An object that specifies the model properties
71 | ***
72 |
73 | ***
74 | #### .modelPath
75 | > *String*. The name of the model being used to generate images
76 | ***
77 |
78 |
79 | ### Methods
80 |
81 |
82 | ***
83 | #### .generate()
84 | > Given a number, will make magicSparkles
85 |
86 | ```js
87 | dcgan.generate(callback, ?latentVector);
88 | ```
89 |
90 | 📥 **Inputs**
91 |
92 | * **callback**: REQUIRED. Function. A function to handle the results of ".generate()". Likely a function to do something with the generated image data.
93 | * **latentVector**: OPTIONAL. An array. A vector to explore the latent space of the model. If no latentVector is given, then a random "location" in the latent space is returned.
94 |
95 | 📤 **Outputs**
96 |
97 | * **Object**: Returns "raw", "blob", and "tensor". If p5.js is available, a "p5Image" will be returned as well.
98 |
99 | ***
100 |
101 |
102 | ## Examples
103 |
104 | **p5.js**
105 | * [DCGAN_LatentVector](https://github.com/ml5js/ml5-examples/tree/development/p5js/DCGAN/DCGAN_LatentVector)
106 | * [DCGAN_LatentVector_RandomWalk](https://github.com/ml5js/ml5-examples/tree/development/p5js/DCGAN/DCGAN_LatentVector_RandomWalk)
107 | * [DCGAN_LatentVector_Slider](https://github.com/ml5js/ml5-examples/tree/development/p5js/DCGAN/DCGAN_LatentVector_Slider)
108 | * [DCGAN_Random](https://github.com/ml5js/ml5-examples/tree/development/p5js/DCGAN/DCGAN_Random)
109 |
110 | **p5 web editor**
111 | * [DCGAN_LatentVector](https://editor.p5js.org/ml5/sketches/DCGAN_LatentVector)
112 | * [DCGAN_LatentVector_RandomWalk](https://editor.p5js.org/ml5/sketches/DCGAN_LatentVector_RandomWalk)
113 | * [DCGAN_LatentVector_Slider](https://editor.p5js.org/ml5/sketches/DCGAN_LatentVector_Slider)
114 | * [DCGAN_Random](https://editor.p5js.org/ml5/sketches/DCGAN_Random)
115 |
116 |
117 | **plain javascript**
118 | * [DCGAN_LatentVector](https://github.com/ml5js/ml5-examples/tree/development/javascript/DCGAN/DCGAN_LatentVector)
119 | * [DCGAN_LatentVector_RandomWalk](https://github.com/ml5js/ml5-examples/tree/development/javascript/DCGAN/DCGAN_LatentVector_RandomWalk)
120 | * [DCGAN_LatentVector_Slider](https://github.com/ml5js/ml5-examples/tree/development/javascript/DCGAN/DCGAN_LatentVector_Slider)
121 | * [DCGAN_Random](https://github.com/ml5js/ml5-examples/tree/development/javascript/DCGAN/DCGAN_Random)
122 |
123 |
124 |
125 | ## Demo
126 |
127 | No demos yet - contribute one today!
128 |
129 | ## Tutorials
130 |
131 | No demos yet - contribute one today!
132 |
133 | ## Acknowledgements
134 |
135 | **Contributors**:
136 | * YG Zhang & Rui An
137 | * Additional contributions by Joey Lee
138 |
139 | **Credits**:
140 | * Paper Reference | Website URL | Github Repo | Book reference | etc
141 |
142 |
143 | ## Source Code
144 |
145 | * [/src/MagicFeature]()
146 |
--------------------------------------------------------------------------------
/src/SketchRNN/index.js:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2018 ml5
2 | //
3 | // This software is released under the MIT License.
4 | // https://opensource.org/licenses/MIT
5 |
6 | /* eslint prefer-destructuring: ['error', {AssignmentExpression: {array: false}}] */
7 | /* eslint no-await-in-loop: 'off' */
8 | /*
9 | SketchRNN
10 | */
11 |
12 | import * as ms from '@magenta/sketch';
13 | import callCallback from '../utils/callcallback';
14 | import modelPaths from './models';
15 | import modelLoader from '../utils/modelLoader';
16 |
17 | // const PATH_START_LARGE = 'https://storage.googleapis.com/quickdraw-models/sketchRNN/large_models/';
18 | // const PATH_START_SMALL = 'https://storage.googleapis.com/quickdraw-models/sketchRNN/models/';
19 | // const PATH_END = '.gen.json';
20 |
21 |
22 | const DEFAULTS = {
23 | modelPath: 'https://storage.googleapis.com/quickdraw-models/sketchRNN/large_models/',
24 | modelPath_large: 'https://storage.googleapis.com/quickdraw-models/sketchRNN/models/',
25 | modelPath_small: 'https://storage.googleapis.com/quickdraw-models/sketchRNN/models/',
26 | PATH_END: '.gen.json',
27 | temperature: 0.65,
28 | pixelFactor: 3.0,
29 | }
30 |
31 | class SketchRNN {
32 | /**
33 | * Create SketchRNN.
34 | * @param {String} model - The name of the sketch model to be loaded.
35 | * The names can be found in the models.js file
36 | * @param {function} callback - Optional. A callback function that is called once the model has loaded. If no callback is provided, it will return a promise
37 | * that will be resolved once the model has loaded.
38 | */
39 | constructor(model, callback, large = true) {
40 | let checkpointUrl = model;
41 |
42 | this.config = {
43 | temperature: 0.65,
44 | pixelFactor: 3.0,
45 | modelPath: DEFAULTS.modelPath,
46 | modelPath_small: DEFAULTS.modelPath_small,
47 | modelPath_large: DEFAULTS.modelPath_large,
48 | PATH_END: DEFAULTS.PATH_END,
49 | };
50 |
51 |
52 | if(modelLoader.isAbsoluteURL(checkpointUrl) === true){
53 | const modelPath = modelLoader.getModelPath(checkpointUrl);
54 | this.config.modelPath = modelPath;
55 |
56 | } else if(modelPaths.has(checkpointUrl)) {
57 | checkpointUrl = (large ? this.config.modelPath : this.config.modelPath_small) + checkpointUrl + this.config.PATH_END;
58 | this.config.modelPath = checkpointUrl;
59 | } else {
60 | console.log('no model found!');
61 | return this;
62 | }
63 |
64 | this.model = new ms.SketchRNN(this.config.modelPath);
65 | this.penState = this.model.zeroInput();
66 | this.ready = callCallback(this.model.initialize(), callback);
67 | }
68 |
69 | async generateInternal(options, strokes) {
70 | const temperature = +options.temperature || this.config.temperature;
71 | const pixelFactor = +options.pixelFactor || this.config.pixelFactor;
72 |
73 | await this.ready;
74 | if (!this.rnnState) {
75 | this.rnnState = this.model.zeroState();
76 | this.model.setPixelFactor(pixelFactor);
77 | }
78 |
79 | if (Array.isArray(strokes) && strokes.length) {
80 | this.rnnState = this.model.updateStrokes(strokes, this.rnnState);
81 | }
82 | this.rnnState = this.model.update(this.penState, this.rnnState);
83 | const pdf = this.model.getPDF(this.rnnState, temperature);
84 | this.penState = this.model.sample(pdf);
85 | const result = {
86 | dx: this.penState[0],
87 | dy: this.penState[1],
88 | };
89 | if (this.penState[2] === 1) {
90 | result.pen = 'down';
91 | } else if (this.penState[3] === 1) {
92 | result.pen = 'up';
93 | } else if (this.penState[4] === 1) {
94 | result.pen = 'end';
95 | }
96 | return result;
97 | }
98 |
99 | async generate(optionsOrSeedOrCallback, seedOrCallback, cb) {
100 | let callback;
101 | let options;
102 | let seedStrokes;
103 |
104 | if (typeof optionsOrSeedOrCallback === 'function') {
105 | options = {};
106 | seedStrokes = [];
107 | callback = optionsOrSeedOrCallback;
108 | } else if (Array.isArray(optionsOrSeedOrCallback)) {
109 | options = {};
110 | seedStrokes = optionsOrSeedOrCallback;
111 | callback = seedOrCallback;
112 | } else if (typeof seedOrCallback === 'function') {
113 | options = optionsOrSeedOrCallback || {};
114 | seedStrokes = [];
115 | callback = seedOrCallback;
116 | } else {
117 | options = optionsOrSeedOrCallback || {};
118 | seedStrokes = seedOrCallback || [];
119 | callback = cb;
120 | }
121 |
122 | const strokes = seedStrokes.map(s => {
123 | const up = s.pen === 'up' ? 1 : 0;
124 | const down = s.pen === 'down' ? 1 : 0;
125 | const end = s.pen === 'end' ? 1 : 0;
126 | return [s.dx, s.dy, down, up, end];
127 | });
128 | return callCallback(this.generateInternal(options, strokes), callback);
129 | }
130 |
131 | reset() {
132 | this.penState = this.model.zeroInput();
133 | if (this.rnnState) {
134 | this.rnnState = this.model.zeroState();
135 | }
136 | }
137 | }
138 |
139 | const sketchRNN = (model, callback, large = true) => new SketchRNN(model, callback, large);
140 |
141 | export default sketchRNN;
142 |
--------------------------------------------------------------------------------
/docs/styleguide/reference-guidelines.md:
--------------------------------------------------------------------------------
1 | # NameOfFeature
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 | ## Description
10 |
11 | Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod
12 | tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam,
13 | quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo
14 | consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse
15 | cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non
16 | proident, sunt in culpa qui officia deserunt mollit anim id est laborum.
17 |
18 | ## Quickstart
19 |
20 | ```js
21 | // Initialize the magicFeature
22 | const magic = ml5.magicFeature('sparkles', modelLoaded);
23 |
24 | // When the model is loaded
25 | function modelLoaded() {
26 | console.log('Model Loaded!');
27 | }
28 |
29 | // Make some sparkles
30 | magic.makeSparkles(100, function(err, results) {
31 | console.log(results);
32 | });
33 | ```
34 |
35 |
36 | ## Usage
37 |
38 | ### Initialize
39 |
40 | ```js
41 | const magic = ml5.magicFeature(requiredInput, ?optionalInput1, ?optionalInput2)
42 | ```
43 |
44 | #### Parameters
45 | * **requiredInput**: REQUIRED. Notice there is no question mark in front of the input.
46 | * **optionalInput1**: OPTIONAL. Notice the `?` indicates an optional parameter.
47 | * **optionalInput2**: OPTIONAL. A description of some kind of object with some properties. Notice the `?` indicates an optional parameter.
48 |
49 | ```
50 | {
51 | sparkleCount: 100,
52 | delightFactor: 1.0,
53 | party: true
54 | }
55 | ```
56 |
57 | ### Properties
58 |
59 |
60 |
65 | ***
66 | #### .property1
67 | > *String*. A description of the property associated with the new model instance.
68 | ***
69 |
72 |
73 | ***
74 | #### .property2
75 | > *Object*. A description of the property associated with the new model instance.
76 | ***
77 |
78 | ***
79 | #### .property3
80 | > *Object*. A description of the property associated with the new model instance.
81 | ***
82 |
83 |
84 | ### Methods
85 |
86 |
91 | ***
92 | #### .makeSparkles()
93 | > Given a number, will make magicSparkles
94 |
95 | ```js
96 | classifier.makeSparkles(?numberOfSparkles, ?callback)
97 | ```
98 |
99 | 📥 **Inputs**
100 |
101 | * **numberOfSparkles**: Optional. Number. The number of sparkles you want to return.
102 | * **callback**: Optional. Function. A function to handle the results of `.makeSparkles()`. Likely a function to do something with the results of makeSparkles.
103 |
104 | 📤 **Outputs**
105 |
106 | * **Object**: Returns an array of objects. Each object contains `{something, anotherThing}`.
107 |
108 | ***
109 |
112 |
113 |
114 |
117 | ***
118 | #### .makeDisappear()
119 | > Given an image, will make objects in the image disappear
120 |
121 | ```js
122 | classifier.makeDisappear(input, ?numberOfObjects, ?callback)
123 | ```
124 |
125 | 📥 **Inputs**
126 | * **input**: REQUIRED. HTMLImageElement | HTMLVideoElement | ImageData | HTMLCanvasElement. The image or video you want to run the function on.
127 | * **numberOfObjects**: Optional. Number. The number of objects you want to disappear.
128 | * **callback**: Optional. Function. A function to handle the results of `.makeDisappear()`. Likely a function to do something with the results of the image where objects have disappeared.
129 |
130 | 📤 **Outputs**
131 |
132 | * **Image**: Returns an image.
133 |
134 | ***
135 |
138 |
139 |
140 | ## Examples
141 |
142 | **p5.js**
143 | * [Example 1]()
144 | * [Example 2]()
145 |
146 | **p5 web editor**
147 | * [Example 1]()
148 | * [Example 2]()
149 |
150 | **plain javascript**
151 | * [Example 1]()
152 | * [Example 2]()
153 |
154 | ## Demo
155 |
156 | No demos yet - contribute one today!
157 |
158 | ## Tutorials
159 |
160 | ### MagicFeature Tutorial 1 via CodingTrain
161 |
162 |
163 | ### MagicFeature Tutorial 2 via CodingTrain
164 |
165 |
166 |
167 | ## Acknowledgements
168 |
169 | **Contributors**:
170 | * Name 1
171 | * Name 2
172 |
173 | **Credits**:
174 | * Paper Reference | Website URL | Github Repo | Book reference | etc
175 |
176 | ## Source Code
177 |
178 | * [/src/MagicFeature]()
179 |
180 |
181 |
--------------------------------------------------------------------------------
/docs/README.md:
--------------------------------------------------------------------------------
1 | # ml5.js - Friendly Machine Learning for the Web
2 |
3 | Welcome to the ml5.js documentation. Here you'll find everything you need to get up and started with ml5.
4 |
5 | ## Getting Started {docsify-ignore}
6 |
7 |
8 | Take a ride on the Coding Train to watch Dan Shiffman's ["A Beginner's Guide to Machine Learning with ml5.js"](https://www.youtube.com/watch?v=jmznx0Q1fP0). Here Dan explains what ml5.js is and where it all comes from.
9 |
10 | ml5.js is machine learning _for the web_ in your web browser. Through some clever and exciting advancements, the folks building [TensorFlow.js](https://www.tensorflow.org/js) figured out that it is possible to use the web browser's built in graphics processing unit (GPU) to do calculations that would otherwise run very slowly using central processing unit (CPU). A really nice explantion of what is happening with GPUs can be found [here - Why are shaders fast?](https://thebookofshaders.com/01/). ml5 strives to make all these new developments in machine learning on the web more approachable for everyone.
11 |
12 |
13 | ### Quickstart
14 |
15 | The fastest way to get started exploring the creative possibilities of ml5.js are to either:
16 |
17 | 1. Download a ml5.js project boilerplate. You can download a zip file here: [ml5 project boilerplate](https://github.com/ml5js/ml5-boilerplate/releases). Or...
18 | 2. Open this [p5 web editor sketch with ml5.js added](https://editor.p5js.org/ml5/sketches/oYweRi2H7).
19 | 3. You can also copy and paste the cdn link to the ml5 library here:
20 |
21 | ```
22 |
23 | ```
24 |
25 | ***
26 | #### Quickstart: Plain JavaScript
27 |
28 | Reference the [latest version](https://unpkg.com/ml5@0.4.3/dist/ml5.min.js) of ml5.js using a script tag in an HTML file as below:
29 |
30 |
31 | In an **index.html** file, copy and paste the following and open up that file in your web browser.
32 |
33 | ```HTML
34 |
35 |
36 |
37 | Getting Started with ml5.js
38 |
39 |
40 |
41 |
42 |
43 |
49 |
50 |
51 | ```
52 |
53 | ***
54 |
55 | ***
56 | #### Quickstart: Powered with p5.js
57 |
58 | If you're familiar with [p5.js](https://p5js.org/), ml5.js has been designed to play very nicely with p5. You can use the following boilerplate code to get started:
59 |
60 |
61 | In an **index.html** file, copy and paste the following and open up that file in your web browser.
62 |
63 | ```html
64 |
65 |
66 |
67 | Getting Started with ml5.js
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
94 |
95 |
96 | ```
97 |
98 | ***
99 |
100 |
101 |
102 |
103 |
104 | ## Join Our Community {docsify-ignore}
105 |
106 | Coming soon
107 |
108 |
109 | ## Contribute to ml5.js {docsify-ignore}
110 |
111 | ml5 is an open source project that values all contributions. ml5 contributions often take the shape of workshops, design contributions, helping to answer people's questions on Github, flagging bugs in code, fixing bugs, adding new features, and more.
112 |
113 | If you'd like to contribute, you're welcome to browse through the issues in our [Github](https://github.com/ml5js/ml5-library/issues) or create a new issue. If you're still unsure of where to start, feel free to ping us at [@ml5js on twitter](https://twitter.com/ml5js), hello@ml5js.org
114 |
115 | ## Support {docsify-ignore}
116 |
117 | ml5 is always on the look out for grants and funding to support the maintenance and development of the ml5 project (including our educational and community based initiatives). If you are an educational institution, grant funding organization, or otherwise interested in funding the ml5 community of students, researchers, artists, educators, designers, and developers, we'd love to hear from you.
118 |
119 | Feel free to reach out at hello@ml5js.org.
120 |
121 | ## Acknowledgements {docsify-ignore}
122 |
123 | ml5.js is supported by the time and dedication of open source developers from all over the world. Funding and support is generously provided by a [Google Education grant](https://edu.google.com/why-google/our-commitment/?modal_active=none%2F) at [NYU's ITP/IMA program](https://itp.nyu.edu/).
124 |
125 | Many thanks [BrowserStack](https://www.browserstack.com/) for providing testing support.
126 |
127 |
128 |
--------------------------------------------------------------------------------
/docs/reference/charrnn.md:
--------------------------------------------------------------------------------
1 | # NameOfFeature
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 | ## Description
10 |
11 | RNN and LSTMs (Long Short Term Memory networks) are a type of Neural Network architecture useful for working with sequential data (like characters in text or the musical notes of a song) where the order of the that sequence matters. This class allows you run a model pre-trained on a body of text to generate new text.
12 |
13 | You can train your own models using this tutorial or use this set of pre trained models.
14 |
15 | ## Quickstart
16 |
17 | ```js
18 | // Create the character level generator with a pre trained model
19 | const rnn = ml5.charRNN("models/bolaño/", modelLoaded);
20 |
21 | // When the model is loaded
22 | function modelLoaded() {
23 | console.log("Model Loaded!");
24 | }
25 |
26 | // Generete content
27 | rnn.generate({ seed: "the meaning of pizza is" }, function(err, results) {
28 | console.log(results);
29 | });
30 | ```
31 |
32 |
33 | ## Usage
34 |
35 | ### Initialize
36 |
37 | ```js
38 | const charrnn = ml5.charRNN(model, ?callback)
39 | ```
40 |
41 | #### Parameters
42 | * **model**: REQUIRED. An absolute or relative path to the charRNN model files.
43 | * **callback**: OPTIONAL. A callback to be called once the model has loaded. If no callback is provided, it will return a promise that will be resolved once the model has loaded.
44 |
45 | ### Properties
46 |
47 |
48 | ***
49 | #### .ready
50 | > Boolean value that specifies if the model has loaded.
51 | ***
52 |
53 | ***
54 | #### .state
55 | > The current state of the model.
56 | ***
57 |
58 | ***
59 | #### .model
60 | > The pre-trained charRNN model.
61 | ***
62 |
63 | ***
64 | #### .vocabSize
65 | > The vocabulary size (or total number of possible characters).
66 | ***
67 |
68 |
69 | ### Methods
70 |
71 |
72 | ***
73 | #### .generate()
74 | > Generates content in a stateless manner, based on some initial text (known as a "seed"). Returns a string.
75 |
76 | ```js
77 | charrnn.generate(options, ?callback)
78 | ```
79 |
80 | 📥 **Inputs**
81 |
82 | * **options**: REQUIRED. An object specifying the input parameters of seed, length and temperature. Default length is 20, temperature is 0.5 and seed is a random character from the model. The object should look like this
83 | ```
84 | {
85 | seed: 'The meaning of pizza is'
86 | length: 20,
87 | temperature: 0.5
88 | }
89 | ```
90 | * **callback**: Optional. Function. A function to be called when the model has generated content. If no callback is provided, it will return a promise that will be resolved once the model has generated new content.
91 |
92 | 📤 **Outputs**
93 |
94 | * **Object**: Returns an object. {sample: generated,state: this.state}.
95 |
96 | ***
97 |
98 |
99 | ***
100 | #### .predict()
101 | > Feed a string of characters to the model state.
102 |
103 | ```js
104 | charrnn.predict(temperature, ?callback)
105 | ```
106 |
107 | 📥 **Inputs**
108 | * **seed**: REQUIRED. Predict the next character based on the model's current state.
109 | * **callback**: Optional. Function. A function to be called when the model finished adding the seed. If no callback is provided, it will return a promise that will be resolved once the prediction has been generated.
110 |
111 | 📤 **Outputs**
112 |
113 | * **Object**: Returns an object `{sample, probabilities}`;
114 |
115 | ***
116 |
117 | ***
118 | #### .feed()
119 | > Given an image, will make objects in the image disappear
120 |
121 | ```js
122 | charrnn.feed(seed, ?callback)
123 | ```
124 |
125 | 📥 **Inputs**
126 | * **seed**: REQUIRED. A string to feed the charRNN model state.
127 | * **callback**: Optional. Function.Optional. A function to be called when the model finished adding the seed. If no callback is provided, it will return a promise that will be resolved once seed has been fed..
128 |
129 | 📤 **Outputs**
130 |
131 | * **Image**: Returns an image.
132 |
133 | ***
134 |
135 | ***
136 | #### .reset()
137 | > Reset the model state
138 |
139 | ```js
140 | charrnn.reset()
141 | ```
142 |
143 | 📥 **Inputs**
144 | * none
145 |
146 | 📤 **Outputs**
147 |
148 | * none
149 |
150 | ***
151 |
152 |
153 | ## Examples
154 |
155 | **p5.js**
156 | * [CharRNN_Interactive](https://github.com/ml5js/ml5-examples/tree/development/p5js/CharRNN/CharRNN_Interactive)
157 | * [CharRNN_Text](https://github.com/ml5js/ml5-examples/tree/development/p5js/CharRNN/CharRNN_Text)
158 | * [CharRNN_Text_Stateful](https://github.com/ml5js/ml5-examples/tree/development/p5js/CharRNN/CharRNN_Text_Stateful)
159 |
160 | **p5 web editor**
161 |
162 | * [CharRNN_Interactive](https://editor.p5js.org/ml5/sketches/CharRNN_Interactive)
163 | * [CharRNN_Text](https://editor.p5js.org/ml5/sketches/CharRNN_Text)
164 | * [CharRNN_Text_Stateful](https://editor.p5js.org/ml5/sketches/CharRNN_Text_Stateful)
165 |
166 | **plain javascript**
167 | * [CharRNN_Interactive](https://github.com/ml5js/ml5-examples/tree/development/javascript/CharRNN/CharRNN_Interactive)
168 | * [CharRNN_Text](https://github.com/ml5js/ml5-examples/tree/development/javascript/CharRNN/CharRNN_Text)
169 | * [CharRNN_Text_Stateful](https://github.com/ml5js/ml5-examples/tree/development/javascript/CharRNN/CharRNN_Text_Stateful)
170 |
171 | ## Demo
172 |
173 | No demos yet - contribute one today!
174 |
175 | ## Tutorials
176 |
177 | No tutorials yet - contribute one today!
178 |
179 | ## Acknowledgements
180 |
181 | **Contributors**:
182 | * Cristobal Valenzuela and Memo Atken
183 |
184 | **Credits**:
185 | * Paper Reference | Website URL | Github Repo | Book reference | etc
186 |
187 | ## Source Code
188 |
189 | * [/src/CharRnn]()
190 |
--------------------------------------------------------------------------------
/src/Word2vec/index.js:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2018 ml5
2 | //
3 | // This software is released under the MIT License.
4 | // https://opensource.org/licenses/MIT
5 |
6 | /*
7 | Word2Vec
8 | */
9 |
10 | import * as tf from '@tensorflow/tfjs';
11 | import callCallback from '../utils/callcallback';
12 |
13 |
14 | class Word2Vec {
15 | /**
16 | * Create Word2Vec model
17 | * @param {String} modelPath - path to pre-trained word vector model in .json e.g data/wordvecs1000.json
18 | * @param {function} callback - Optional. A callback function that is called once the model has loaded. If no callback is provided, it will return a promise
19 | * that will be resolved once the model has loaded.
20 | */
21 | constructor(modelPath, callback) {
22 | this.model = {};
23 | this.modelPath = modelPath;
24 | this.modelSize = 0;
25 | this.modelLoaded = false;
26 |
27 | this.ready = callCallback(this.loadModel(), callback);
28 | // TODO: Add support to Promise
29 | // this.then = this.ready.then.bind(this.ready);
30 | }
31 |
32 | async loadModel() {
33 | const json = await fetch(this.modelPath)
34 | .then(response => response.json());
35 | Object.keys(json.vectors).forEach((word) => {
36 | this.model[word] = tf.tensor1d(json.vectors[word]);
37 | });
38 | this.modelSize = Object.keys(this.model).length;
39 | this.modelLoaded = true;
40 | return this;
41 | }
42 |
43 | dispose(callback) {
44 | Object.values(this.model).forEach(x => x.dispose());
45 | if (callback) {
46 | callback();
47 | }
48 | }
49 |
50 | async add(inputs, maxOrCb, cb) {
51 | const { max, callback } = Word2Vec.parser(maxOrCb, cb, 10);
52 |
53 | await this.ready;
54 | return tf.tidy(() => {
55 | const sum = Word2Vec.addOrSubtract(this.model, inputs, 'ADD');
56 | const result = Word2Vec.nearest(this.model, sum, inputs.length, inputs.length + max);
57 | if (callback) {
58 | callback(undefined, result);
59 | }
60 | return result;
61 | });
62 | }
63 |
64 | async subtract(inputs, maxOrCb, cb) {
65 | const { max, callback } = Word2Vec.parser(maxOrCb, cb, 10);
66 |
67 | await this.ready;
68 | return tf.tidy(() => {
69 | const subtraction = Word2Vec.addOrSubtract(this.model, inputs, 'SUBTRACT');
70 | const result = Word2Vec.nearest(this.model, subtraction, inputs.length, inputs.length + max);
71 | if (callback) {
72 | callback(undefined, result);
73 | }
74 | return result;
75 | });
76 | }
77 |
78 | async average(inputs, maxOrCb, cb) {
79 | const { max, callback } = Word2Vec.parser(maxOrCb, cb, 10);
80 |
81 | await this.ready;
82 | return tf.tidy(() => {
83 | const sum = Word2Vec.addOrSubtract(this.model, inputs, 'ADD');
84 | const avg = tf.div(sum, tf.tensor(inputs.length));
85 | const result = Word2Vec.nearest(this.model, avg, inputs.length, inputs.length + max);
86 | if (callback) {
87 | callback(undefined, result);
88 | }
89 | return result;
90 | });
91 | }
92 |
93 | async nearest(input, maxOrCb, cb) {
94 | const { max, callback } = Word2Vec.parser(maxOrCb, cb, 10);
95 |
96 | await this.ready;
97 | const vector = this.model[input];
98 | let result;
99 | if (vector) {
100 | result = Word2Vec.nearest(this.model, vector, 1, max + 1);
101 | } else {
102 | result = null;
103 | }
104 |
105 | if (callback) {
106 | callback(undefined, result);
107 | }
108 | return result;
109 | }
110 |
111 | async getRandomWord(callback) {
112 | await this.ready;
113 | const words = Object.keys(this.model);
114 | const result = words[Math.floor(Math.random() * words.length)];
115 | if (callback) {
116 | callback(undefined, result);
117 | }
118 | return result;
119 | }
120 |
121 | static parser(maxOrCallback, cb, defaultMax) {
122 | let max = defaultMax;
123 | let callback = cb;
124 |
125 | if (typeof maxOrCallback === 'function') {
126 | callback = maxOrCallback;
127 | } else if (typeof maxOrCallback === 'number') {
128 | max = maxOrCallback;
129 | }
130 | return { max, callback };
131 | }
132 |
133 | static addOrSubtract(model, values, operation) {
134 | return tf.tidy(() => {
135 | const vectors = [];
136 | const notFound = [];
137 | if (values.length < 2) {
138 | throw new Error('Invalid input, must be passed more than 1 value');
139 | }
140 | values.forEach((value) => {
141 | const vector = model[value];
142 | if (!vector) {
143 | notFound.push(value);
144 | } else {
145 | vectors.push(vector);
146 | }
147 | });
148 |
149 | if (notFound.length > 0) {
150 | throw new Error(`Invalid input, vector not found for: ${notFound.toString()}`);
151 | }
152 | let result = vectors[0];
153 | if (operation === 'ADD') {
154 | for (let i = 1; i < vectors.length; i += 1) {
155 | result = tf.add(result, vectors[i]);
156 | }
157 | } else {
158 | for (let i = 1; i < vectors.length; i += 1) {
159 | result = tf.sub(result, vectors[i]);
160 | }
161 | }
162 | return result;
163 | });
164 | }
165 |
166 | static nearest(model, input, start, max) {
167 | const nearestVectors = [];
168 | Object.keys(model).forEach((word) => {
169 | const distance = tf.util.distSquared(input.dataSync(), model[word].dataSync());
170 | nearestVectors.push({ word, distance });
171 | });
172 | nearestVectors.sort((a, b) => a.distance - b.distance);
173 | return nearestVectors.slice(start, max);
174 | }
175 | }
176 |
177 | const word2vec = (model, cb) => new Word2Vec(model, cb);
178 |
179 | export default word2vec;
180 |
--------------------------------------------------------------------------------
/src/ObjectDetector/YOLO/postprocess.js:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2018 ml5
2 | //
3 | // This software is released under the MIT License.
4 | // https://opensource.org/licenses/MIT
5 |
6 | // Heavily derived from YAD2K (https://github.com/allanzelener/YAD2K)
7 | /* eslint max-len: ["error", { "code": 180 }] */
8 |
9 | import * as tf from '@tensorflow/tfjs';
10 |
11 | // export const ANCHORS = tf.tensor2d([
12 | // [0.57273, 0.677385],
13 | // [1.87446, 2.06253],
14 | // [3.33843, 5.47434],
15 | // [7.88282, 3.52778],
16 | // [9.77052, 9.16828],
17 | // ]);
18 |
19 | export const boxIntersection = (a, b) => {
20 | const w = Math.min(a[3], b[3]) - Math.max(a[1], b[1]);
21 | const h = Math.min(a[2], b[2]) - Math.max(a[0], b[0]);
22 | if (w < 0 || h < 0) {
23 | return 0;
24 | }
25 | return w * h;
26 | };
27 |
28 | export const boxUnion = (a, b) => {
29 | const i = boxIntersection(a, b);
30 | return (((a[3] - a[1]) * (a[2] - a[0])) + ((b[3] - b[1]) * (b[2] - b[0]))) - i;
31 | };
32 |
33 | export const boxIOU = (a, b) => boxIntersection(a, b) / boxUnion(a, b);
34 |
35 | export async function filterBoxes(
36 | boxes,
37 | boxConfidence,
38 | boxClassProbs,
39 | threshold,
40 | ) {
41 |
42 | return tf.tidy(() => {
43 |
44 | const boxScores = tf.mul(boxConfidence, boxClassProbs);
45 | const boxClasses = tf.argMax(boxScores, -1);
46 | const boxClassScores = tf.max(boxScores, -1);
47 |
48 | const predictionMask = tf.greaterEqual(boxClassScores, tf.scalar(threshold));
49 |
50 | const maskArr = predictionMask.dataSync();
51 |
52 | const indicesArr = [];
53 | for (let i = 0; i < maskArr.length; i += 1) {
54 | const v = maskArr[i];
55 | if (v) {
56 | indicesArr.push(i);
57 | }
58 | }
59 |
60 | if (indicesArr.length === 0) {
61 | return [null, null, null];
62 | }
63 |
64 | const indices = tf.tensor1d(indicesArr, 'int32');
65 |
66 | const result = [
67 | tf.gather(boxes.reshape([maskArr.length, 4]), indices),
68 | tf.gather(boxClassScores.flatten(), indices),
69 | tf.gather(boxClasses.flatten(), indices),
70 | ];
71 |
72 | // boxes.dispose();
73 | // boxClassScores.dispose();
74 | // boxClasses.dispose();
75 |
76 | return result;
77 | })
78 | }
79 |
80 | export const boxesToCorners = (boxXY, boxWH) => {
81 | return tf.tidy(() => {
82 | const two = tf.tensor1d([2.0]);
83 | const boxMins = tf.sub(boxXY, tf.div(boxWH, two));
84 | const boxMaxes = tf.add(boxXY, tf.div(boxWH, two));
85 |
86 | const dim0 = boxMins.shape[0];
87 | const dim1 = boxMins.shape[1];
88 | const dim2 = boxMins.shape[2];
89 | const size = [dim0, dim1, dim2, 1];
90 |
91 | return tf.concat([
92 | boxMins.slice([0, 0, 0, 1], size),
93 | boxMins.slice([0, 0, 0, 0], size),
94 | boxMaxes.slice([0, 0, 0, 1], size),
95 | boxMaxes.slice([0, 0, 0, 0], size),
96 | ], 3);
97 | })
98 | };
99 |
100 | export const nonMaxSuppression = (boxes, scores, iouThreshold) => {
101 | return tf.tidy(() => {
102 | // Zip together scores, box corners, and index
103 | const zipped = [];
104 | for (let i = 0; i < scores.length; i += 1) {
105 | zipped.push([
106 | scores[i],
107 | [boxes[4 * i], boxes[(4 * i) + 1], boxes[(4 * i) + 2], boxes[(4 * i) + 3]], i,
108 | ]);
109 | }
110 | const sortedBoxes = zipped.sort((a, b) => b[0] - a[0]);
111 | const selectedBoxes = [];
112 |
113 | sortedBoxes.forEach((box) => {
114 | let add = true;
115 | for (let i = 0; i < selectedBoxes.length; i += 1) {
116 | const curIOU = boxIOU(box[1], selectedBoxes[i][1]);
117 | if (curIOU > iouThreshold) {
118 | add = false;
119 | break;
120 | }
121 | }
122 | if (add) {
123 | selectedBoxes.push(box);
124 | }
125 | });
126 |
127 | return [
128 | selectedBoxes.map(e => e[2]),
129 | selectedBoxes.map(e => e[1]),
130 | selectedBoxes.map(e => e[0]),
131 | ];
132 | })
133 | };
134 |
135 | // Convert yolo output to bounding box + prob tensors
136 | /* eslint no-param-reassign: 0 */
137 | export function head(feats, anchors, numClasses) {
138 | return tf.tidy(() => {
139 | const numAnchors = anchors.shape[0];
140 |
141 | const anchorsTensor = tf.reshape(anchors, [1, 1, numAnchors, 2]);
142 |
143 | let convDims = feats.shape.slice(1, 3);
144 |
145 | // For later use
146 | const convDims0 = convDims[0];
147 | const convDims1 = convDims[1];
148 |
149 | let convHeightIndex = tf.range(0, convDims[0]);
150 | let convWidthIndex = tf.range(0, convDims[1]);
151 | convHeightIndex = tf.tile(convHeightIndex, [convDims[1]]);
152 |
153 | convWidthIndex = tf.tile(tf.expandDims(convWidthIndex, 0), [convDims[0], 1]);
154 | convWidthIndex = tf.transpose(convWidthIndex).flatten();
155 |
156 | let convIndex = tf.transpose(tf.stack([convHeightIndex, convWidthIndex]));
157 | convIndex = tf.reshape(convIndex, [convDims[0], convDims[1], 1, 2]);
158 | convIndex = tf.cast(convIndex, feats.dtype);
159 |
160 | feats = tf.reshape(feats, [convDims[0], convDims[1], numAnchors, numClasses + 5]);
161 | convDims = tf.cast(tf.reshape(tf.tensor1d(convDims), [1, 1, 1, 2]), feats.dtype);
162 |
163 | let boxXY = tf.sigmoid(feats.slice([0, 0, 0, 0], [convDims0, convDims1, numAnchors, 2]));
164 | let boxWH = tf.exp(feats.slice([0, 0, 0, 2], [convDims0, convDims1, numAnchors, 2]));
165 | const boxConfidence = tf.sigmoid(feats.slice([0, 0, 0, 4], [convDims0, convDims1, numAnchors, 1]));
166 | const boxClassProbs = tf.softmax(feats.slice([0, 0, 0, 5], [convDims0, convDims1, numAnchors, numClasses]));
167 |
168 | boxXY = tf.div(tf.add(boxXY, convIndex), convDims);
169 | boxWH = tf.div(tf.mul(boxWH, anchorsTensor), convDims);
170 |
171 | return [boxXY, boxWH, boxConfidence, boxClassProbs];
172 | })
173 | }
--------------------------------------------------------------------------------