├── polynominal-fitting-curve
├── .gitignore
├── .babelrc
├── README.MD
├── package.json
├── data-seed.js
├── index.html
├── plot.js
└── app.js
├── handwritten-digit-recognition-canvas-draw
├── .gitignore
├── ._fabric.min.js
├── .babelrc
├── package.json
├── inference.js
├── README.MD
├── app.js
├── index.html
├── train.js
└── GenerateData.js
├── translation
├── dist
│ ├── .DS_Store
│ ├── ._.DS_Store
│ ├── ._model.json
│ ├── ._metadata.json
│ ├── vietai-01.png
│ ├── Archivo-Regular.ttf
│ ├── group1-shard1of1.txt
│ ├── ._group1-shard1of1.txt
│ ├── metadata.json
│ ├── index.html
│ └── model.json
├── python
│ ├── .DS_Store
│ ├── ._vie.txt
│ ├── ._.DS_Store
│ ├── ._vie-eng.zip
│ ├── vie-eng.zip
│ ├── .ipynb_checkpoints
│ │ └── translation-checkpoint.ipynb
│ ├── __init__.py
│ ├── translation.py
│ └── translation.ipynb
├── README.md
├── package.json
├── ui.js
├── serve.sh
├── loader.js
├── build-resources.sh
├── index.html
└── index.js
└── README.md
/polynominal-fitting-curve/.gitignore:
--------------------------------------------------------------------------------
1 | /node_modules
2 | /dist
--------------------------------------------------------------------------------
/handwritten-digit-recognition-canvas-draw/.gitignore:
--------------------------------------------------------------------------------
1 | /node_modules
2 | /dist
--------------------------------------------------------------------------------
/translation/dist/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bangoc123/tensorflow-js/HEAD/translation/dist/.DS_Store
--------------------------------------------------------------------------------
/translation/dist/._.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bangoc123/tensorflow-js/HEAD/translation/dist/._.DS_Store
--------------------------------------------------------------------------------
/translation/dist/._model.json:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bangoc123/tensorflow-js/HEAD/translation/dist/._model.json
--------------------------------------------------------------------------------
/translation/python/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bangoc123/tensorflow-js/HEAD/translation/python/.DS_Store
--------------------------------------------------------------------------------
/translation/python/._vie.txt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bangoc123/tensorflow-js/HEAD/translation/python/._vie.txt
--------------------------------------------------------------------------------
/translation/dist/._metadata.json:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bangoc123/tensorflow-js/HEAD/translation/dist/._metadata.json
--------------------------------------------------------------------------------
/translation/dist/vietai-01.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bangoc123/tensorflow-js/HEAD/translation/dist/vietai-01.png
--------------------------------------------------------------------------------
/translation/python/._.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bangoc123/tensorflow-js/HEAD/translation/python/._.DS_Store
--------------------------------------------------------------------------------
/translation/python/._vie-eng.zip:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bangoc123/tensorflow-js/HEAD/translation/python/._vie-eng.zip
--------------------------------------------------------------------------------
/translation/python/vie-eng.zip:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bangoc123/tensorflow-js/HEAD/translation/python/vie-eng.zip
--------------------------------------------------------------------------------
/translation/dist/Archivo-Regular.ttf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bangoc123/tensorflow-js/HEAD/translation/dist/Archivo-Regular.ttf
--------------------------------------------------------------------------------
/translation/dist/group1-shard1of1.txt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bangoc123/tensorflow-js/HEAD/translation/dist/group1-shard1of1.txt
--------------------------------------------------------------------------------
/translation/dist/._group1-shard1of1.txt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bangoc123/tensorflow-js/HEAD/translation/dist/._group1-shard1of1.txt
--------------------------------------------------------------------------------
/translation/python/.ipynb_checkpoints/translation-checkpoint.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [],
3 | "metadata": {},
4 | "nbformat": 4,
5 | "nbformat_minor": 2
6 | }
7 |
--------------------------------------------------------------------------------
/handwritten-digit-recognition-canvas-draw/._fabric.min.js:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bangoc123/tensorflow-js/HEAD/handwritten-digit-recognition-canvas-draw/._fabric.min.js
--------------------------------------------------------------------------------
/translation/README.md:
--------------------------------------------------------------------------------
1 | # TensorFlow.js Example: Sequence-to-Sequence English-Vietnam Translation
2 |
3 | To launch the demo, do
4 |
5 | ```
6 | yarn
7 | yarn watch
8 | ```
9 |
10 |
--------------------------------------------------------------------------------
/polynominal-fitting-curve/.babelrc:
--------------------------------------------------------------------------------
1 | {
2 | "presets": [
3 | ["env", {
4 | "targets": {
5 | "browsers": ["last 2 Chrome versions"]
6 | }
7 | }]
8 | ]
9 | }
--------------------------------------------------------------------------------
/handwritten-digit-recognition-canvas-draw/.babelrc:
--------------------------------------------------------------------------------
1 | {
2 | "presets": [
3 | ["env", {
4 | "targets": {
5 | "browsers": ["last 2 Chrome versions"]
6 | }
7 | }]
8 | ]
9 | }
--------------------------------------------------------------------------------
/polynominal-fitting-curve/README.MD:
--------------------------------------------------------------------------------
1 | ### I. Training, Inference Regression Model on Browser
2 |
3 | 
4 |
5 | 
6 | ### II. Installation and Run
7 |
8 | ```sh
9 | $ cd ./
10 | $ yarn
11 | $ yarn dev
12 | ```
13 |
14 | ```sh
15 | Open 127.0.0.1:1234
16 | ```
17 |
--------------------------------------------------------------------------------
/translation/python/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2018 Google LLC. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # =============================================================================
15 |
16 | from __future__ import absolute_import
17 | from __future__ import division
18 | from __future__ import print_function
19 |
--------------------------------------------------------------------------------
/translation/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "tfjs-examples-translation",
3 | "version": "0.1.0",
4 | "description": "",
5 | "main": "index.js",
6 | "license": "Apache-2.0",
7 | "private": true,
8 | "engines": {
9 | "node": ">=8.9.0"
10 | },
11 | "dependencies": {
12 | "@tensorflow/tfjs": "^0.14.1",
13 | "vega-embed": "^3.0.0"
14 | },
15 | "scripts": {
16 | "watch": "./serve.sh",
17 | "build": "cross-env NODE_ENV=production parcel build index.html --no-minify --public-url ./",
18 | "link-local": "yalc link",
19 | "postinstall": "yarn upgrade --pattern @tensorflow"
20 | },
21 | "devDependencies": {
22 | "babel-core": "^6.26.3",
23 | "babel-plugin-transform-runtime": "~6.23.0",
24 | "babel-polyfill": "~6.26.0",
25 | "babel-preset-env": "~1.6.1",
26 | "clang-format": "~1.2.2",
27 | "cross-env": "^5.1.6",
28 | "http-server": "~0.10.0",
29 | "parcel-bundler": "~1.10.3",
30 | "yalc": "~1.0.0-pre.22"
31 | }
32 | }
33 |
--------------------------------------------------------------------------------
/polynominal-fitting-curve/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "polynominal-fitting-curve",
3 | "version": "0.1.0",
4 | "description": "",
5 | "main": "app.js",
6 | "license": "Apache-2.0",
7 | "private": true,
8 | "engines": {
9 | "node": ">=8.9.0"
10 | },
11 | "dependencies": {
12 | "@tensorflow/tfjs": "^0.12.0",
13 | "vega-embed": "^3.2.0",
14 | "vega-lite": "^2.3.1"
15 | },
16 | "scripts": {
17 | "watch": "cross-env NODE_ENV=development parcel index.html --no-hmr --open",
18 | "dev": "cross-env NODE_ENV=development parcel index.html",
19 | "build": "cross-env NODE_ENV=production parcel build index.html --no-minify --public-url ./",
20 | "postinstall": "yarn upgrade --pattern @tensorflow"
21 | },
22 | "devDependencies": {
23 | "babel-plugin-transform-async-to-generator": "^6.24.1",
24 | "babel-plugin-transform-runtime": "~6.23.0",
25 | "babel-polyfill": "~6.26.0",
26 | "babel-preset-env": "~1.6.1",
27 | "clang-format": "~1.2.2",
28 | "cross-env": "^5.1.6",
29 | "parcel-bundler": "~1.8.1"
30 | }
31 | }
32 |
--------------------------------------------------------------------------------
/handwritten-digit-recognition-canvas-draw/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "handwritten-digit-recognition-canvas-draw",
3 | "version": "0.1.0",
4 | "description": "",
5 | "main": "app.js",
6 | "license": "Apache-2.0",
7 | "private": true,
8 | "engines": {
9 | "node": ">=8.9.0"
10 | },
11 | "dependencies": {
12 | "@tensorflow/tfjs": "^0.12.0",
13 | "jsdom": "^11.11.0",
14 | "vega-embed": "^3.2.0",
15 | "vega-lite": "^2.3.1",
16 | "xmldom": "^0.1.27"
17 | },
18 | "scripts": {
19 | "watch": "cross-env NODE_ENV=development parcel index.html --no-hmr --open",
20 | "dev": "cross-env NODE_ENV=development parcel index.html",
21 | "build": "cross-env NODE_ENV=production parcel build index.html --no-minify --public-url ./",
22 | "postinstall": "yarn upgrade --pattern @tensorflow"
23 | },
24 | "devDependencies": {
25 | "babel-plugin-transform-async-to-generator": "^6.24.1",
26 | "babel-plugin-transform-runtime": "~6.23.0",
27 | "babel-polyfill": "~6.26.0",
28 | "babel-preset-env": "^1.7.0",
29 | "clang-format": "~1.2.2",
30 | "cross-env": "^5.1.6",
31 | "parcel-bundler": "~1.8.1"
32 | }
33 | }
34 |
--------------------------------------------------------------------------------
/handwritten-digit-recognition-canvas-draw/inference.js:
--------------------------------------------------------------------------------
1 | import * as tf from "@tensorflow/tfjs";
2 |
3 | async function inference(imgArrayData, imgData) {
4 | const img2d = tf.tensor2d(imgArrayData, [28, 28]);
5 | const imgToInference = img2d.reshape([1, 28, 28, 1])
6 | let img = tf.fromPixels(imgData, 1);
7 | img = img.reshape([1, 28, 28, 1]);
8 | img = tf.cast(img, 'float32');
9 |
10 | // Load model
11 | const loadedModel = await tf.loadModel('localstorage://saved-model');
12 |
13 | if (loadedModel){
14 | const output = loadedModel.predict(imgToInference);
15 | const axis = 1;
16 | const predictions = Array.from(output.argMax(axis).dataSync());
17 | const labels = document.getElementsByClassName("number");
18 | for(let i=0; i< labels.length; i +=1 ) {
19 | labels[i].style.backgroundColor = "#fff";
20 | }
21 | const label = document.getElementById(`${predictions[0]}`);
22 | label.style.backgroundColor = "#ffa700";
23 | } else {
24 | alert('Can not find any models from storage, please train model before prediction')
25 | }
26 | }
27 |
28 | export default inference;
--------------------------------------------------------------------------------
/polynominal-fitting-curve/data-seed.js:
--------------------------------------------------------------------------------
1 | import * as tf from '@tensorflow/tfjs';
2 |
3 | /**
4 | *
5 | * @param {*} numOfPoints
6 | * @param {*} coeff
7 | * @param {*} sigma
8 | * @returns { x, y: nomalized_y }
9 | */
10 | function mockData(numOfPoints, coeff, sigma = 0.03) {
11 | return tf.tidy(() => {
12 | const [a, b, c, d, e] = [
13 | tf.scalar(coeff.a),
14 | tf.scalar(coeff.b),
15 | tf.scalar(coeff.c),
16 | tf.scalar(coeff.d),
17 | tf.scalar(coeff.e)
18 | ];
19 | const x = tf.randomUniform([numOfPoints], -1, 1)
20 | const [four, three] = [
21 | tf.scalar(4, 'int32'),
22 | tf.scalar(3, 'int32')
23 | ]
24 | // Polynominal Function: ax^4 + bx^3 + cx^2 + dx + e
25 | const y = a.mul(x.pow(four))
26 | .add(b.mul(x.pow(three)))
27 | .add(c.mul(x.square()))
28 | .add(d.mul(x))
29 | .add(e)
30 | .add(tf.randomNormal([numOfPoints], 0, sigma))
31 | const y_min = y.min()
32 | const y_max = y.max()
33 | const nomalized_y = y.sub(y_min).div(y_max.sub(y_min))
34 | return {
35 | x,
36 | y: nomalized_y,
37 | };
38 | });
39 | }
40 |
41 | export default mockData;
42 |
43 |
--------------------------------------------------------------------------------
/translation/dist/metadata.json:
--------------------------------------------------------------------------------
1 | {"input_token_index": {" ": 0, "!": 1, "'": 2, ",": 3, ".": 4, "?": 5, "A": 6, "C": 7, "D": 8, "F": 9, "H": 10, "I": 11, "J": 12, "L": 13, "M": 14, "N": 15, "O": 16, "R": 17, "S": 18, "T": 19, "U": 20, "V": 21, "W": 22, "Y": 23, "a": 24, "b": 25, "c": 26, "d": 27, "e": 28, "f": 29, "g": 30, "h": 31, "i": 32, "j": 33, "k": 34, "l": 35, "m": 36, "n": 37, "o": 38, "p": 39, "r": 40, "s": 41, "t": 42, "u": 43, "v": 44, "w": 45, "y": 46, "z": 47}, "target_token_index": {"\t": 0, "\n": 1, " ": 2, "!": 3, ",": 4, ".": 5, "?": 6, "A": 7, "B": 8, "C": 9, "D": 10, "E": 11, "F": 12, "G": 13, "H": 14, "I": 15, "K": 16, "L": 17, "M": 18, "N": 19, "O": 20, "Q": 21, "R": 22, "S": 23, "T": 24, "U": 25, "V": 26, "a": 27, "b": 28, "c": 29, "d": 30, "e": 31, "g": 32, "h": 33, "i": 34, "k": 35, "l": 36, "m": 37, "n": 38, "o": 39, "p": 40, "q": 41, "r": 42, "s": 43, "t": 44, "u": 45, "v": 46, "x": 47, "y": 48, "à": 49, "á": 50, "â": 51, "ã": 52, "é": 53, "ê": 54, "ì": 55, "í": 56, "ò": 57, "ó": 58, "ô": 59, "ù": 60, "ú": 61, "ă": 62, "Đ": 63, "đ": 64, "ĩ": 65, "ơ": 66, "ư": 67, "̀": 68, "̣": 69, "ạ": 70, "ả": 71, "ấ": 72, "ầ": 73, "ậ": 74, "ắ": 75, "ẹ": 76, "ẽ": 77, "ế": 78, "ề": 79, "ể": 80, "ễ": 81, "ệ": 82, "ỉ": 83, "ị": 84, "ọ": 85, "ố": 86, "ồ": 87, "ộ": 88, "ớ": 89, "ờ": 90, "Ở": 91, "ở": 92, "ỡ": 93, "ợ": 94, "ụ": 95, "ủ": 96, "ứ": 97, "ừ": 98, "ử": 99, "ự": 100}, "max_encoder_seq_length": 14, "max_decoder_seq_length": 29}
--------------------------------------------------------------------------------
/handwritten-digit-recognition-canvas-draw/README.MD:
--------------------------------------------------------------------------------
1 | ### Training, Inference CNN on Browser
2 |
3 | | Resource | Link |
4 | | ------ | ------ |
5 | | Slide at Google I/O Extended | [Machine learning on Browser and TensorFlow for JavaScript](https://docs.google.com/presentation/d/1ZVO6Ripu0JL6d-aFTsUuDdlfJVKbBZC3xVupGrl_P3k/edit?usp=sharing) |
6 | | Features | [MNIST Data Features](https://storage.googleapis.com/learnjs-data/model-builder/mnist_images.png) |
7 | | Labels | [MNIST Data Labels](https://storage.googleapis.com/learnjs-data/model-builder/mnist_labels_uint8) |
8 |
9 |
10 |
11 |
12 | We will train a CNN at Browser with 2 hidden layers
13 |
14 | - **Layer Convolution**
15 | - input **[28, 28, 1]**
16 | - kernel size: **5**
17 | - activation Function: **reLu**
18 | - number of Filters: **8**
19 | - strides: **1**
20 | - **Layer MaxPooling**
21 | - pool Size: **2**
22 | - strides: **2**
23 | - **Last Layer**
24 | - Softmax Layer with **10 classes**
25 |
26 | ### Installation and Run
27 |
28 | ```sh
29 | $ cd ./
30 | $ yarn
31 | $ yarn dev
32 | ```
33 |
34 | ```sh
35 | Open 127.0.0.1:1234
36 | ```
37 |
38 | ### Training
39 |
40 | 
41 |
--------------------------------------------------------------------------------
/translation/ui.js:
--------------------------------------------------------------------------------
1 | /**
2 | * @license
3 | * Copyright 2018 Google LLC. All Rights Reserved.
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | * =============================================================================
16 | */
17 |
18 | export function status(statusText) {
19 | console.log(statusText);
20 | document.getElementById('status').textContent = statusText;
21 | }
22 |
23 | export function setEnglish(text, translate) {
24 | document.getElementById('englishSentence').value = text;
25 | document.getElementById('frenchSentence').textContent = translate(text);
26 | }
27 |
28 | export function setTranslationFunction(translate) {
29 | const englishElement = document.getElementById('englishSentence');
30 | englishElement.addEventListener('input', (e) => {
31 | const inputSentence = englishElement.value;
32 | document.getElementById('frenchSentence').textContent =
33 | translate(inputSentence);
34 | });
35 | }
36 |
37 | export function disableLoadModelButtons() {
38 | document.getElementById('load-pretrained-remote').disabled = true;
39 | document.getElementById('load-pretrained-local').disabled = true;
40 | }
41 |
--------------------------------------------------------------------------------
/handwritten-digit-recognition-canvas-draw/app.js:
--------------------------------------------------------------------------------
1 | import inference from './inference';
2 |
3 | // Set up canvas
4 | const cv = new fabric.Canvas('cv')
5 | cv.isDrawingMode = true;
6 | cv.freeDrawingBrush.width = 20;
7 | cv.freeDrawingBrush.color = "#47494d";
8 | cv.backgroundColor = "#ffffff";
9 | cv.renderAll();
10 |
11 |
12 | // Add listener to button
13 | const clearBtn = document.getElementById("clear-btn");
14 | clearBtn.addEventListener('click', () => {
15 | cv.clear();
16 | })
17 |
18 | cv.on("mouse:up", () => {
19 | // Get canvas contents as url
20 | const scale = (1.) / 10.;
21 | const url = cv.toDataURL({
22 | format: 'png',
23 | multiplier: scale,
24 | });
25 | console.log('url', url);
26 | // FIXME
27 | const cv28 = document.createElement('canvas');
28 | cv28.width = 28;
29 | cv28.height = 28;
30 | const ctx = cv28.getContext('2d');
31 | const img = new Image;
32 | img.src = url;
33 | img.onload = () => {
34 | ctx.drawImage(img, 0, 0)
35 | const imgData = ctx.getImageData(0, 0, 28, 28);
36 | console.log('data', imgData);
37 | const imgDateLength = imgData.data.length
38 | // Loop through each pixel in chunk
39 |
40 | const imgArrayData = new Float32Array(28 * 28);
41 |
42 | for(let j = 0; j < imgDateLength / 4; j +=1) {
43 | // All channel has same value -> only need to read red channel
44 | const red_index = j * 4;
45 | // Nomarlize pixel value to [0, 1]
46 | imgArrayData[j] = imgData.data[red_index] / 255;
47 | }
48 |
49 | inference(imgArrayData, imgData);
50 | }
51 | })
52 |
53 |
--------------------------------------------------------------------------------
/polynominal-fitting-curve/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
16 |
17 |
18 | Polynominal Fitting Curve
19 | Test async
20 | Train
21 |
22 |
23 |
Original Data
24 |
25 | True coefficients:
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
Training
35 |
36 | Learned coefficients:
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
52 |
53 |
--------------------------------------------------------------------------------
/translation/serve.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # Copyright 2018 Google LLC. All Rights Reserved.
4 | #
5 | # Licensed under the Apache License, Version 2.0 (the "License");
6 | # you may not use this file except in compliance with the License.
7 | # You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 | # =============================================================================
17 |
18 | # This script starts two HTTP servers on different ports:
19 | # * Port 1234 (using parcel) serves HTML and JavaScript.
20 | # * Port 1235 (using http-server) serves pretrained model resources.
21 | #
22 | # The reason for this arrangement is that Parcel currently has a limitation that
23 | # prevents it from serving the pretrained models; see
24 | # https://github.com/parcel-bundler/parcel/issues/1098. Once that issue is
25 | # resolved, a single Parcel server will be sufficient.
26 |
27 | NODE_ENV=development
28 | RESOURCE_PORT=1235
29 |
30 | # Ensure that http-server is available
31 | yarn
32 |
33 | echo Starting the pretrained model server...
34 | node_modules/http-server/bin/http-server dist --cors -p "${RESOURCE_PORT}" > /dev/null & HTTP_SERVER_PID=$!
35 |
36 | echo Starting the example html/js server...
37 | # This uses port 1234 by default.
38 | node_modules/parcel-bundler/bin/cli.js serve -d dist index.html --open --no-hmr --public-url /
39 |
40 | # When the Parcel server exits, kill the http-server too.
41 | kill $HTTP_SERVER_PID
42 |
43 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | ### I. TensorFlow JS Experiments
2 |
3 | This year, I am a speaker at Google I/O Extended Hanoi 2018 and my topic is Machine learning on Browser and TensorFlow for JavaScript. I will experiment TensorFlowJS more and update this repo frequently.
4 |
5 |
6 |
7 | | Resource | Link |
8 | | ------ | ------ |
9 | | Slide at Google I/O Extended | [Machine learning on Browser and TensorFlow for JavaScript](https://docs.google.com/presentation/d/1ZVO6Ripu0JL6d-aFTsUuDdlfJVKbBZC3xVupGrl_P3k/edit?usp=sharing) |
10 | | LiveStream at Google I/O Extended | [LiveStream at Google I/O Extended](https://www.youtube.com/watch?v=JWn2vESsRcY) |
11 |
12 | #### 1. Polynomial Fitting Curve
13 | ----
14 | 
15 |
16 | #### 2. Training, Inference CNN
17 | -----
18 |
19 |
20 |
21 | ### II. Installation and Run
22 |
23 | ```sh
24 | $ cd ./folder-name
25 | $ yarn
26 | $ yarn dev
27 | ```
28 |
29 | ```sh
30 | Open 127.0.0.1:1234
31 | ```
32 |
33 |
--------------------------------------------------------------------------------
/polynominal-fitting-curve/plot.js:
--------------------------------------------------------------------------------
1 | import renderChart from 'vega-embed';
2 |
3 | export async function plotData(element, x, y) {
4 | const xvals = await x.data();
5 | const yvals = await y.data();
6 |
7 | const values = Array.from(yvals).map((y, i) => {
8 | return {'x': xvals[i], 'y': yvals[i]};
9 | });
10 |
11 | const spec = {
12 | '$schema': 'https://vega.github.io/schema/vega-lite/v2.json',
13 | 'width': 300,
14 | 'height': 300,
15 | 'data': {'values': values},
16 | 'mark': 'point',
17 | 'encoding': {
18 | 'x': {'field': 'x', 'type': 'quantitative'},
19 | 'y': {'field': 'y', 'type': 'quantitative'}
20 | }
21 | };
22 | return renderChart(element, spec, {actions: false});
23 | }
24 |
25 | export async function plotPred(element, x, y, pred) {
26 | const xvals = await x.data();
27 | const yvals = await y.data();
28 | const predVals = await pred.data();
29 | const values = Array.from(yvals).map((y, i) => {
30 | return {'x': xvals[i], 'y': yvals[i], pred: predVals[i]};
31 | });
32 |
33 | const spec = {
34 | '$schema': 'https://vega.github.io/schema/vega-lite/v2.json',
35 | 'width': 300,
36 | 'height': 300,
37 | 'data': {'values': values},
38 | 'layer': [
39 | {
40 | 'mark': 'point',
41 | 'encoding': {
42 | 'x': {'field': 'x', 'type': 'quantitative'},
43 | 'y': {'field': 'y', 'type': 'quantitative'}
44 | }
45 | },
46 | {
47 | 'mark': 'line',
48 | 'encoding': {
49 | 'x': {'field': 'x', 'type': 'quantitative'},
50 | 'y': {'field': 'pred', 'type': 'quantitative'},
51 | 'color': {'value': 'tomato'}
52 | },
53 | }
54 | ]
55 | };
56 |
57 | return renderChart(element, spec, {actions: false});
58 | }
59 |
60 | export function renderCoeff(element, coeff) {
61 | document.querySelector(element).innerHTML =
62 | `a=${coeff.a.toFixed(3)}, b=${coeff.b.toFixed(3)}, c=${
63 | coeff.c.toFixed(3)}, d=${coeff.d.toFixed(3)}, e=${coeff.e.toFixed(3)} `;
64 | }
--------------------------------------------------------------------------------
/translation/loader.js:
--------------------------------------------------------------------------------
1 | /**
2 | * @license
3 | * Copyright 2018 Google LLC. All Rights Reserved.
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | * =============================================================================
16 | */
17 |
18 | import * as tf from '@tensorflow/tfjs';
19 | import * as ui from './ui';
20 |
21 | /**
22 | * Test whether a given URL is retrievable.
23 | */
24 | export async function urlExists(url) {
25 | ui.status('Testing url ' + url);
26 | try {
27 | const response = await fetch(url, {method: 'HEAD'});
28 | return response.ok;
29 | } catch (err) {
30 | return false;
31 | }
32 | }
33 |
34 | /**
35 | * Load pretrained model stored at a remote URL.
36 | *
37 | * @return An instance of `tf.Model` with model topology and weights loaded.
38 | */
39 | export async function loadHostedPretrainedModel(url) {
40 | ui.status('Loading pretrained model from ' + url);
41 | try {
42 | const model = await tf.loadModel(url);
43 | ui.status('Done loading pretrained model.');
44 | // We can't load a model twice due to
45 | // https://github.com/tensorflow/tfjs/issues/34
46 | // Therefore we remove the load buttons to avoid user confusion.
47 | ui.disableLoadModelButtons();
48 | return model;
49 | } catch (err) {
50 | console.error(err);
51 | ui.status('Loading pretrained model failed.');
52 | }
53 | }
54 |
55 | /**
56 | * Load metadata file stored at a remote URL.
57 | *
58 | * @return An object containing metadata as key-value pairs.
59 | */
60 | export async function loadHostedMetadata(url) {
61 | ui.status('Loading metadata from ' + url);
62 | try {
63 | const metadataJson = await fetch(url);
64 | const metadata = await metadataJson.json();
65 | ui.status('Done loading metadata.');
66 | return metadata;
67 | } catch (err) {
68 | console.error(err);
69 | ui.status('Loading metadata failed.');
70 | }
71 | }
72 |
--------------------------------------------------------------------------------
/polynominal-fitting-curve/app.js:
--------------------------------------------------------------------------------
1 | import * as tf from '@tensorflow/tfjs';
2 | import mockData from './data-seed';
3 | import { renderCoeff, plotData, plotPred } from './plot';
4 |
5 | // Step 1. Initialize Random Variables
6 | const a = tf.variable(tf.scalar(Math.random()))
7 | const b = tf.variable(tf.scalar(Math.random()))
8 | const c = tf.variable(tf.scalar(Math.random()))
9 | const d = tf.variable(tf.scalar(Math.random()))
10 | const e = tf.variable(tf.scalar(Math.random()))
11 |
12 | // Step 2. Create An Optimizer
13 | const numOfInterations = 2000;
14 | const learningRate = 0.1;
15 | const optimizer = tf.train.sgd(learningRate)
16 |
17 | // Step 3. Write Predict Function
18 | function predict(x) {
19 | return tf.tidy(() => {
20 | const four = tf.scalar(4, 'int32');
21 | const three = tf.scalar(3, 'int32');
22 | return a.mul(x.pow(four))
23 | .add(b.mul(x.pow(three)))
24 | .add(c.mul(x.square()))
25 | .add(d.mul(x))
26 | .add(e)
27 | })
28 | }
29 |
30 | // Step 4. Define Loss Function: Mean Square Error
31 | function calculateLoss(pred, labels) {
32 | return pred.sub(labels).square().mean();
33 | }
34 |
35 | // Step 5. Implement Training
36 | async function train(x, y, numOfInterations) {
37 | for (let i =0; i < numOfInterations; i += 1) {
38 | optimizer.minimize(() => {
39 | const pred = predict(x);
40 | const loss = calculateLoss(pred, y);
41 | console.log(`Epoch ${i} with loss: ${loss}`);
42 | return loss;
43 |
44 | });
45 | if ( i%5 == 0) {
46 | await updateChart();
47 | }
48 | // Important: Do not block main thread.
49 | await tf.nextFrame();
50 | }
51 | }
52 |
53 | const trueCoeff = { a: -0.7, b: -0.8, c: -0.2, d: 0.8, e: 0.4 }
54 | const trainData = mockData(100, trueCoeff);
55 |
56 | async function updateChart() {
57 | // Update realtime coeff
58 | renderCoeff('#trained .coeff', {
59 | a: a.dataSync()[0],
60 | b: b.dataSync()[0],
61 | c: c.dataSync()[0],
62 | d: d.dataSync()[0],
63 | e: e.dataSync()[0],
64 | });
65 |
66 | const pred = predict(trainData.x);
67 | await plotPred("#trained .plot", trainData.x, trainData.y, pred);
68 | // pred.dispose();
69 | }
70 |
71 |
72 | async function run() {
73 | // Display data
74 | console.log('trainData', trainData);
75 | renderCoeff("#data .coeff", trueCoeff);
76 | await plotData("#data .plot", trainData.x, trainData.y);
77 | // Training
78 | const trainButton = document.getElementsByClassName("train")[0];
79 | trainButton.addEventListener('click', async () => {
80 | await train(trainData.x, trainData.y, numOfInterations);
81 | })
82 | }
83 |
84 | run();
85 |
86 |
--------------------------------------------------------------------------------
/handwritten-digit-recognition-canvas-draw/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
59 |
60 | Clear
61 | Train
62 |
63 |
64 |
65 |
66 |
0
67 |
1
68 |
2
69 |
3
70 |
4
71 |
5
72 |
6
73 |
7
74 |
8
75 |
9
76 |
77 |
78 |
79 |
80 |
81 |
83 |
84 |
--------------------------------------------------------------------------------
/translation/build-resources.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # Copyright 2018 Google LLC. All Rights Reserved.
4 | #
5 | # Licensed under the Apache License, Version 2.0 (the "License");
6 | # you may not use this file except in compliance with the License.
7 | # You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 | # =============================================================================
17 |
18 | # Builds resources for the sequence-to-sequence English-French translation demo.
19 | # Note this is not necessary to run the demo, because we already provide hosted
20 | # pre-built resources.
21 | # Usage example: do this in the 'translation' directory:
22 | # ./build.sh ~/ml-data/fra-eng/fra.txt
23 | #
24 | # You can specify the number of training epochs by using the --epochs flag.
25 | # For example:
26 | # ./build-resources.sh ~/ml-data/fra-eng/fra.txt --epochs 10
27 |
28 | set -e
29 |
30 | DEMO_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
31 |
32 | TRAIN_DATA_PATH="$1"
33 | if [[ -z "${TRAIN_DATA_PATH}" ]]; then
34 | echo "ERROR: TRAIN_DATA_PATH is not specified."
35 | echo "You can download the training data with a command such as:"
36 | echo " wget http://www.manythings.org/anki/fra-eng.zip"
37 | exit 1
38 | fi
39 | shift 1
40 |
41 | if [[ ! -f ${TRAIN_DATA_PATH} ]]; then
42 | echo "ERROR: Cannot find training data at path '${TRAIN_DATA_PATH}'"
43 | exit 1
44 | fi
45 |
46 | TRAIN_EPOCHS=100
47 | while true; do
48 | if [[ "$1" == "--epochs" ]]; then
49 | TRAIN_EPOCHS=$2
50 | shift 2
51 | elif [[ -z "$1" ]]; then
52 | break
53 | else
54 | echo "ERROR: Unrecognized argument: $1"
55 | exit 1
56 | fi
57 | done
58 |
59 | RESOURCES_ROOT="${DEMO_DIR}/dist/resources"
60 | rm -rf "${RESOURCES_ROOT}"
61 | mkdir -p "${RESOURCES_ROOT}"
62 |
63 | # Run Python script to generate the pretrained model and weights files.
64 | # Make sure you install the tensorflowjs pip package first.
65 |
66 | python "${DEMO_DIR}/python/translation.py" \
67 | "${TRAIN_DATA_PATH}" \
68 | --recurrent_initializer glorot_uniform \
69 | --artifacts_dir "${RESOURCES_ROOT}" \
70 | --epochs "${TRAIN_EPOCHS}"
71 | # TODO(cais): This --recurrent_initializer is a workaround for the limitation
72 | # in TensorFlow.js Layers that the default recurrent initializer "Orthogonal" is
73 | # currently not supported. Remove this once "Orthogonal" becomes available.
74 |
75 | cd ${DEMO_DIR}
76 | yarn
77 | yarn build
78 |
79 | echo
80 | echo "-----------------------------------------------------------"
81 | echo "Resources written to ${RESOURCES_ROOT}."
82 | echo "You can now run the demo with 'yarn watch'."
83 | echo "-----------------------------------------------------------"
84 | echo
85 |
--------------------------------------------------------------------------------
/translation/dist/index.html:
--------------------------------------------------------------------------------
1 |
17 |
18 |
19 |
20 |
21 | VietAI Seq2Seq Demo
22 |
23 |
24 |
25 |
26 |
27 |
31 |
32 |
88 |
89 |
90 |
91 |
92 |
93 |
94 |
95 |
96 |
97 |
98 |
99 |
100 |
101 |
102 | TensorFlow.js: Seq2Seq
103 |
104 |
105 |
106 |
107 |
108 |
109 | Status
110 | Standing by.
111 |
112 |
113 |
114 | Load Model
115 |
116 |
117 | Load local pretrained model
118 | Load host pretrained model
119 |
120 |
121 |
122 |
123 |
124 | Test Model
125 |
126 |
127 | EN:
128 |
129 |
130 |
131 | VN:
132 |
133 |
134 |
135 |
136 |
137 |
138 |
139 |
140 |
144 |
145 |
146 |
147 |
148 |
--------------------------------------------------------------------------------
/translation/index.html:
--------------------------------------------------------------------------------
1 |
17 |
18 |
19 |
20 |
21 | VietAI Seq2Seq Demo
22 |
23 |
24 |
25 |
26 |
27 |
33 |
34 |
94 |
95 |
96 |
97 |
98 |
99 |
100 |
101 |
102 |
103 |
104 |
105 |
106 |
107 |
108 | TensorFlow.js: Seq2Seq
109 |
110 |
111 |
112 |
113 |
114 |
115 | Status
116 | Standing by.
117 |
118 |
119 |
120 | Load Model
121 |
122 |
123 | Load local pretrained model
124 | Load host pretrained model
125 |
126 |
127 |
128 |
129 |
130 | Test Model
131 |
132 |
133 | EN:
134 |
135 |
136 |
137 | VN:
138 |
139 |
140 |
141 |
142 |
143 |
144 |
145 |
146 |
150 |
151 |
152 |
153 |
154 |
--------------------------------------------------------------------------------
/handwritten-digit-recognition-canvas-draw/train.js:
--------------------------------------------------------------------------------
1 | import GenerateData from './GenerateData';
2 | import * as tf from "@tensorflow/tfjs";
3 |
4 |
5 |
6 | let data;
7 | async function loadData() {
8 | data = new GenerateData();
9 | await data.load();
10 | }
11 |
12 | async function run() {
13 | // Define hyperparameter
14 | const NUM_EPOCHES = 400;
15 | const LEARNING_RATE = 0.15;
16 | const BATCH_SIZE = 100;
17 | const LINK_SAVE_MODEL = 'localstorage://saved-model';
18 |
19 | // Implement Sequential Neural Network
20 | const model = tf.sequential()
21 |
22 | // Output ((n + 2p -f) / s + 1) * ((n + 2p -f) / s + 1) * numsFilters
23 | // = ((28 + 2 * 0 - 5) / 1 + 1) * ((28 + 2 * 0 - 5) / 1 + 1) * 8
24 | // = (24 * 24 * 8)
25 | const conv2d_1 = tf.layers.conv2d({
26 | inputShape: [28, 28, 1],
27 | kernelSize: 5,
28 | filters: 8,
29 | strides: 1,
30 | activation: 'relu',
31 | kernelInitializer: 'varianceScaling'
32 | })
33 |
34 | // Add convolution layer 1
35 | model.add(conv2d_1)
36 |
37 | // Output ((n + 2p -f) / s + 1) * ((n + 2p -f) / s + 1) * numsFilters
38 | // = ((24 + 2 * 0 - 2) / 2 + 1) * ((24 + 2 * 0 - 2) / 2 + 1) * 8
39 | // = (12 * 12 * 8)
40 | const maxPooling2d_1 = tf.layers.maxPooling2d({
41 | poolSize: [2, 2],
42 | strides: [2, 2]
43 | })
44 |
45 | // Add max pooling layer 1
46 | model.add(maxPooling2d_1)
47 |
48 | // Output ((n + 2p -f) / s + 1) * ((n + 2p -f) / s + 1) * numsFilters
49 | // = ((12 + 2 * 0 - 5) / 1 + 1) * ((12 + 2 * 0 - 5) / 1 + 1) * 16
50 | // = (8 * 8 * 16)
51 | const conv2d_2 = tf.layers.conv2d({
52 | kernelSize: 5,
53 | filters: 16,
54 | strides: 1,
55 | activation: 'relu',
56 | kernelInitializer: 'varianceScaling'
57 | })
58 |
59 | // Add convolution layer 2
60 | model.add(conv2d_2)
61 |
62 | // Output ((n + 2p -f) / s + 1) * ((n + 2p -f) / s + 1) * numsFilters
63 | // = ((8 + 2 * 0 - 2) / 2 + 1) * ((8 + 2 * 0 - 2) / 2 + 1) * 16
64 | // = (4 * 4 * 16)
65 | const maxPooling2d_2 = tf.layers.maxPooling2d({
66 | poolSize: [2, 2],
67 | strides: [2, 2],
68 | })
69 |
70 | // Add max pooling layer 2
71 | model.add(maxPooling2d_2)
72 |
73 | // Flattern output to 1D vector to pass through fully-connected network
74 | model.add(tf.layers.flatten())
75 |
76 | // Dense layer with softmax function with 10 class {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}
77 | const dense = tf.layers.dense({
78 | units: 10,
79 | kernelInitializer: 'varianceScaling',
80 | activation: 'softmax'
81 | })
82 |
83 | // Add dense layer
84 | model.add(dense)
85 |
86 |
87 | // Define Optimizer
88 | const optimizer = tf.train.sgd(LEARNING_RATE);
89 |
90 | // Generate Model
91 | model.compile({
92 | optimizer,
93 | loss: 'categoricalCrossentropy',
94 | metrics: ['accuracy'],
95 | })
96 | for (let i=0; i < NUM_EPOCHES; i++) {
97 | const [ trainBatch, validationData ] = tf.tidy(() => {
98 | const trainBatch = data.nextBatch("train", BATCH_SIZE)
99 | // console.log(trainBatch)
100 | // Reshape from [BATCH_SIZE, 784] to [BATCH_SIZE, 28, 28, 1]
101 | trainBatch.images = trainBatch.images.reshape([BATCH_SIZE, 28, 28, 1])
102 | let validationData;
103 | // Validation training after each 5 epochs
104 | if(i % 5 == 0) {
105 | validationData = data.nextBatch("test", BATCH_SIZE);
106 | validationData.images = validationData.images.reshape([BATCH_SIZE, 28, 28, 1]);
107 | }
108 |
109 | return [trainBatch, validationData]
110 | });
111 |
112 | const history = await model.fit(
113 | trainBatch.images, trainBatch.labels,
114 | {
115 | batchSize: BATCH_SIZE, validationData, epochs: 1,
116 | }
117 | )
118 | const loss = history.history.loss[0]
119 | const acc = history.history.acc[0]
120 |
121 | if((i+1) % 50 == 0) {
122 | console.log(`Epoch ${i} with Loss is ${loss} with accuracy: ${acc}`);
123 |
124 | // Update UI.
125 |
126 | const progress = document.getElementById("progress");
127 | progress.style.width = `${(i+1)/NUM_EPOCHES * 100}%`;
128 | }
129 |
130 |
131 | // Dispose to free GPU memory
132 | tf.dispose([ trainBatch, validationData ])
133 |
134 | // tf.nextFrame() returns a promise that resolves at the next call to
135 | // requestAnimationFrame(). By awaiting this promise we keep our model
136 | // training from blocking the main UI thread and freezing the browser.
137 | await tf.nextFrame();
138 | }
139 |
140 | console.log('Finish training...');
141 | const saveResult = await model.save(LINK_SAVE_MODEL);
142 | console.log(`Saved model to ${LINK_SAVE_MODEL}`)
143 | }
144 |
145 | async function boom() {
146 | await loadData();
147 | await run();
148 | }
149 |
150 | const trainBtn = document.getElementById("train-btn");
151 | trainBtn.addEventListener('click', () => {
152 | boom();
153 | });
154 |
155 |
156 |
157 |
158 |
159 | // y = 2 ^ 2 + 1
160 | const y = tf.tidy(() => {
161 | // a, b, and one will be cleaned up when the tidy ends.
162 | const one = tf.scalar(1);
163 | const a = tf.scalar(2);
164 | const b = a.square();
165 |
166 | console.log('numTensors (in tidy): ' + tf.memory().numTensors);
167 |
168 | // The value returned inside the tidy function will return
169 | // through the tidy, in this case to the variable y.
170 | return b.add(one);
171 | });
172 |
173 | console.log('numTensors (outside tidy): ' + tf.memory().numTensors);
174 | y.print();
175 |
176 |
177 |
178 | // numTensors (in tidy): 3
179 | // numTensors (outside tidy): 1
180 | // Tensor
181 | // 5
--------------------------------------------------------------------------------
/handwritten-digit-recognition-canvas-draw/GenerateData.js:
--------------------------------------------------------------------------------
1 | import * as tf from '@tensorflow/tfjs';
2 |
3 | class GenerateData {
4 | constructor(props) {
5 | this.FEATURES_LINK = "https://storage.googleapis.com/learnjs-data/model-builder/mnist_images.png";
6 | this.LABELS_LINK = "https://storage.googleapis.com/learnjs-data/model-builder/mnist_labels_uint8";
7 | this.CLASSES_NUM = 10;
8 | this.TOTAL_DATASET_NUM = 65000;
9 | this.TRAIN_NUM = 55000;
10 | this.TEST_NUM = 10000;
11 | this.IMG_SIZE = 784;
12 | this.CHUNK_SIZE = 5000;
13 | this.currentTrainIndex = 0;
14 | this.currentTestIndex = 0;
15 | }
16 |
17 | extractFeatures() {
18 | return new Promise((resolve, reject) => {
19 | const img = new Image();
20 | img.crossOrigin = '';
21 | img.src = this.FEATURES_LINK;
22 |
23 | // Create canvas to draw img
24 | const cv = document.createElement('canvas');
25 | const ctx = cv.getContext('2d');
26 | img.onload = () => {
27 | img.height = img.naturalHeight; // 65000
28 | img.width = img.naturalWidth; // 784
29 | // Set width, height to canvas -> 650000 * 5000
30 | cv.width = img.width;
31 | cv.height = this.CHUNK_SIZE;
32 |
33 | // 4 is 4 channel like [0, 0, 0, 255]
34 | const datasetBuff = new ArrayBuffer(this.TOTAL_DATASET_NUM * this.IMG_SIZE * 4) // 65000 * 784 * 4
35 |
36 | // https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/DataView
37 | // Data View
38 |
39 | for(let i=0; i < this.TOTAL_DATASET_NUM/this.CHUNK_SIZE; i+=1) { // 0 -> 12
40 | // Float32Array(3920000)
41 | // Create data view to hold value of each pixel.
42 | // We will have 13 data view
43 | const datasetBytesView = new Float32Array(datasetBuff,
44 | i * this.CHUNK_SIZE * this.IMG_SIZE * 4
45 | , this.IMG_SIZE * this.CHUNK_SIZE);
46 |
47 | // https://developer.mozilla.org/en-US/docs/Web/API/CanvasRenderingContext2D/drawImage
48 | // Dataset is a big image with size 784 * 65000 (width * height)
49 | // Create a slider with size 784 * 5000 and slide from top to bottom
50 | // -> we will get 13 chunk with size 784 * 5000
51 |
52 | ctx.drawImage(img, 0, i * this.CHUNK_SIZE, img.width, this.CHUNK_SIZE, 0, 0, img.width, this.CHUNK_SIZE);
53 | const imgData = ctx.getImageData(0, 0, img.width, this.CHUNK_SIZE);
54 | const imgDateLength = imgData.data.length
55 |
56 | // Loop through each pixel in chunk
57 | for(let j = 0; j < imgDateLength / 4; j +=1) {
58 | // All channel has same value -> only need to read red channel
59 | const red_index = j * 4;
60 | // Nomarlize pixel value to [0, 1]
61 | datasetBytesView[j] = imgData.data[red_index] / 255;
62 | }
63 | console.log('Done Extracting Labels for chunk: ', i);
64 | }
65 | // 784 * 65000 -> 784: flattended image pixels, 65000: number of images
66 | // each row represent an img 28 * 28
67 | // each element hold one nomalized pixel data
68 | this.datasetImgs = new Float32Array(datasetBuff);
69 | resolve();
70 | }
71 | })
72 | }
73 |
74 | extractLabels() {
75 | return new Promise((resolve, reject) => {
76 | fetch(this.LABELS_LINK).then(res => {
77 | res.arrayBuffer().then(buff => {
78 | const labels = new Uint8Array(buff);
79 | this.labels = labels;
80 | console.log(labels);
81 | resolve();
82 | }).catch(err => reject(err))
83 | }).catch(err => reject(err))
84 | })
85 | }
86 |
87 |
88 | load() {
89 | return new Promise((resolve, reject) => {
90 | const promises = [this.extractFeatures(), this.extractLabels()]
91 | Promise.all(promises).then(() => {
92 | console.log("Finish extract datas and labels");
93 | // Generate shuffled train and test indicies
94 | // Uint32Array(55000) with shuffled indicies
95 | this.trainIndicies = tf.util.createShuffledIndices(this.TRAIN_NUM);
96 | this.testIndicies = tf.util.createShuffledIndices(this.TEST_NUM);
97 |
98 |
99 | // Generate train and test images
100 | this.trainImgs = this.datasetImgs.slice(0, this.TRAIN_NUM * this.IMG_SIZE);
101 | this.testImgs = this.datasetImgs.slice(this.TRAIN_NUM * this.IMG_SIZE);
102 |
103 | // Generate train and test labels
104 | this.trainLabels = this.labels.slice(0, this.TRAIN_NUM * this.CLASSES_NUM);
105 | this.testLabels = this.labels.slice(this.TRAIN_NUM * this.CLASSES_NUM);
106 | resolve();
107 | }).catch(err => {
108 | reject(err);
109 | });
110 | });
111 | }
112 |
113 | nextBatch(type, batchSize) {
114 | let images;
115 | let labels;
116 | const batchImgs = new Float32Array(this.IMG_SIZE * batchSize);
117 | const batchLabels = new Uint8Array(this.CLASSES_NUM * batchSize);
118 | let idx;
119 | if(type === "train") {
120 | [ images, labels ] = [ this.trainImgs, this.trainLabels ];
121 | const newTrainIndex = this.currentTrainIndex + batchSize;
122 | idx = this.trainIndicies.slice(this.currentTrainIndex, newTrainIndex);
123 | this.currentTrainIndex = newTrainIndex;
124 | } else if (type === "test") {
125 | [ images, labels ] = [ this.testImgs, this.testLabels ];
126 | const newTestIndex = this.currentTestIndex + batchSize;
127 | idx = this.trainIndicies.slice(this.currentTestIndex, newTestIndex);
128 | this.currentTestIndex = newTestIndex;
129 | }
130 |
131 | for(let i =0; i < batchSize; i += 1) {
132 | const index = idx[i];
133 | const image = images.slice(index * this.IMG_SIZE, (index+1) * this.IMG_SIZE)
134 | const label = labels.slice(index * this.CLASSES_NUM, (index + 1) * this.CLASSES_NUM)
135 | batchImgs.set(image, i * this.IMG_SIZE);
136 | batchLabels.set(label, i * this.CLASSES_NUM);
137 | }
138 |
139 | return {
140 | images: tf.tensor2d(batchImgs, [ batchSize, this.IMG_SIZE ]),
141 | labels: tf.tensor2d(batchLabels, [ batchSize, this.CLASSES_NUM ])
142 | }
143 | }
144 | }
145 |
146 | export default GenerateData;
--------------------------------------------------------------------------------
/translation/index.js:
--------------------------------------------------------------------------------
1 | /**
2 | * @license
3 | * Copyright 2018 Google LLC. All Rights Reserved.
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | * =============================================================================
16 | */
17 |
18 | import * as tf from '@tensorflow/tfjs';
19 | import * as loader from './loader';
20 | import * as ui from './ui';
21 |
22 |
23 | const HOSTED_URLS = {
24 | model:
25 | 'http://localhost:1234/model.json',
26 | metadata:
27 | 'http://localhost:1234/metadata.json'
28 | };
29 |
30 | const LOCAL_URLS = {
31 | model: 'http://localhost:1234/model.json',
32 | metadata: 'http://localhost:1234/metadata.json'
33 | };
34 |
35 | class Translator {
36 | /**
37 | * Initializes the Translation demo.
38 | */
39 | async init(urls) {
40 | this.urls = urls;
41 | const model = await loader.loadHostedPretrainedModel(urls.model);
42 | await this.loadMetadata();
43 | this.prepareEncoderModel(model);
44 | this.prepareDecoderModel(model);
45 | return this;
46 | }
47 |
48 | async loadMetadata() {
49 | const translationMetadata =
50 | await loader.loadHostedMetadata(this.urls.metadata);
51 | this.maxDecoderSeqLength = translationMetadata['max_decoder_seq_length'];
52 | this.maxEncoderSeqLength = translationMetadata['max_encoder_seq_length'];
53 | console.log('maxDecoderSeqLength = ' + this.maxDecoderSeqLength);
54 | console.log('maxEncoderSeqLength = ' + this.maxEncoderSeqLength);
55 | this.inputTokenIndex = translationMetadata['input_token_index'];
56 | this.targetTokenIndex = translationMetadata['target_token_index'];
57 | this.reverseTargetCharIndex =
58 | Object.keys(this.targetTokenIndex)
59 | .reduce(
60 | (obj, key) => (obj[this.targetTokenIndex[key]] = key, obj), {});
61 | }
62 |
63 | prepareEncoderModel(model) {
64 | this.numEncoderTokens = model.input[0].shape[2];
65 | console.log('numEncoderTokens = ' + this.numEncoderTokens);
66 |
67 | const encoderInputs = model.input[0];
68 | const stateH = model.layers[2].output[1];
69 | const stateC = model.layers[2].output[2];
70 | const encoderStates = [stateH, stateC];
71 |
72 | this.encoderModel =
73 | tf.model({inputs: encoderInputs, outputs: encoderStates});
74 | }
75 |
76 | prepareDecoderModel(model) {
77 | this.numDecoderTokens = model.input[1].shape[2];
78 | console.log('numDecoderTokens = ' + this.numDecoderTokens);
79 |
80 | const stateH = model.layers[2].output[1];
81 | const latentDim = stateH.shape[stateH.shape.length - 1];
82 | console.log('latentDim = ' + latentDim);
83 | const decoderStateInputH =
84 | tf.input({shape: [latentDim], name: 'decoder_state_input_h'});
85 | const decoderStateInputC =
86 | tf.input({shape: [latentDim], name: 'decoder_state_input_c'});
87 | const decoderStateInputs = [decoderStateInputH, decoderStateInputC];
88 |
89 | const decoderLSTM = model.layers[3];
90 | const decoderInputs = decoderLSTM.input[0];
91 | const applyOutputs =
92 | decoderLSTM.apply(decoderInputs, {initialState: decoderStateInputs});
93 | let decoderOutputs = applyOutputs[0];
94 | const decoderStateH = applyOutputs[1];
95 | const decoderStateC = applyOutputs[2];
96 | const decoderStates = [decoderStateH, decoderStateC];
97 |
98 | const decoderDense = model.layers[4];
99 | decoderOutputs = decoderDense.apply(decoderOutputs);
100 | this.decoderModel = tf.model({
101 | inputs: [decoderInputs].concat(decoderStateInputs),
102 | outputs: [decoderOutputs].concat(decoderStates)
103 | });
104 | }
105 |
106 | /**
107 | * Encode a string (e.g., a sentence) as a Tensor3D that can be fed directly
108 | * into the TensorFlow.js model.
109 | */
110 | encodeString(str) {
111 | const strLen = str.length;
112 | const encoded =
113 | tf.buffer([1, this.maxEncoderSeqLength, this.numEncoderTokens]);
114 | for (let i = 0; i < strLen; ++i) {
115 | if (i >= this.maxEncoderSeqLength) {
116 | console.error(
117 | 'Input sentence exceeds maximum encoder sequence length: ' +
118 | this.maxEncoderSeqLength);
119 | }
120 |
121 | const tokenIndex = this.inputTokenIndex[str[i]];
122 | if (tokenIndex == null) {
123 | console.error(
124 | 'Character not found in input token index: "' + tokenIndex + '"');
125 | }
126 | encoded.set(1, 0, i, tokenIndex);
127 | }
128 | return encoded.toTensor();
129 | }
130 |
131 | decodeSequence(inputSeq) {
132 | // Encode the inputs state vectors.
133 | let statesValue = this.encoderModel.predict(inputSeq);
134 |
135 | // Generate empty target sequence of length 1.
136 | let targetSeq = tf.buffer([1, 1, this.numDecoderTokens]);
137 | // Populate the first character of the target sequence with the start
138 | // character.
139 | targetSeq.set(1, 0, 0, this.targetTokenIndex['\t']);
140 |
141 | // Sample loop for a batch of sequences.
142 | // (to simplify, here we assume that a batch of size 1).
143 | let stopCondition = false;
144 | let decodedSentence = '';
145 | while (!stopCondition) {
146 | const predictOutputs =
147 | this.decoderModel.predict([targetSeq.toTensor()].concat(statesValue));
148 | const outputTokens = predictOutputs[0];
149 | const h = predictOutputs[1];
150 | const c = predictOutputs[2];
151 |
152 | // Sample a token.
153 | // We know that outputTokens.shape is [1, 1, n], so no need for slicing.
154 | const logits = outputTokens.reshape([outputTokens.shape[2]]);
155 | const sampledTokenIndex = logits.argMax().dataSync()[0];
156 | const sampledChar = this.reverseTargetCharIndex[sampledTokenIndex];
157 | decodedSentence += sampledChar;
158 |
159 | // Exit condition: either hit max length or find stop character.
160 | if (sampledChar === '\n' ||
161 | decodedSentence.length > this.maxDecoderSeqLength) {
162 | stopCondition = true;
163 | }
164 |
165 | // Update the target sequence (of length 1).
166 | targetSeq = tf.buffer([1, 1, this.numDecoderTokens]);
167 | targetSeq.set(1, 0, 0, sampledTokenIndex);
168 |
169 | // Update states.
170 | statesValue = [h, c];
171 | }
172 |
173 | return decodedSentence;
174 | }
175 |
176 | /** Translate the given English sentence into French. */
177 | translate(inputSentence) {
178 | const inputSeq = this.encodeString(inputSentence);
179 | const decodedSentence = this.decodeSequence(inputSeq);
180 | return decodedSentence;
181 | }
182 | }
183 |
184 | /**
185 | * Loads the pretrained model and metadata, and registers the translation
186 | * function with the UI.
187 | */
188 | async function setupTranslator() {
189 | if (await loader.urlExists(HOSTED_URLS.model)) {
190 | ui.status('Model available: ' + HOSTED_URLS.model);
191 | const button = document.getElementById('load-pretrained-remote');
192 | button.addEventListener('click', async () => {
193 | const translator = await new Translator().init(HOSTED_URLS);
194 | ui.setTranslationFunction(x => translator.translate(x));
195 | ui.setEnglish('Hello', x => translator.translate(x));
196 | });
197 | button.style.display = 'inline-block';
198 | }
199 |
200 | if (await loader.urlExists(LOCAL_URLS.model)) {
201 | ui.status('Model available: ' + LOCAL_URLS.model);
202 | const button = document.getElementById('load-pretrained-local');
203 | button.addEventListener('click', async () => {
204 | const translator = await new Translator().init(LOCAL_URLS);
205 | ui.setTranslationFunction(x => translator.translate(x));
206 | ui.setEnglish('Hello', x => translator.translate(x));
207 | });
208 | button.style.display = 'inline-block';
209 | }
210 |
211 | ui.status('Standing by.');
212 | }
213 |
214 | setupTranslator();
215 |
--------------------------------------------------------------------------------
/translation/dist/model.json:
--------------------------------------------------------------------------------
1 | {
2 | "modelTopology": {
3 | "keras_version": "2.2.2",
4 | "backend": "tensorflow",
5 | "model_config": {
6 | "class_name": "Model",
7 | "config": {
8 | "name": "model_1",
9 | "layers": [{
10 | "name": "input_1",
11 | "class_name": "InputLayer",
12 | "config": {
13 | "batch_input_shape": [null, null, 48],
14 | "dtype": "float32",
15 | "sparse": false,
16 | "name": "input_1"
17 | },
18 | "inbound_nodes": []
19 | }, {
20 | "name": "input_2",
21 | "class_name": "InputLayer",
22 | "config": {
23 | "batch_input_shape": [null, null, 101],
24 | "dtype": "float32",
25 | "sparse": false,
26 | "name": "input_2"
27 | },
28 | "inbound_nodes": []
29 | }, {
30 | "name": "lstm_1",
31 | "class_name": "LSTM",
32 | "config": {
33 | "name": "lstm_1",
34 | "trainable": true,
35 | "return_sequences": false,
36 | "return_state": true,
37 | "go_backwards": false,
38 | "stateful": false,
39 | "unroll": false,
40 | "units": 256,
41 | "activation": "tanh",
42 | "recurrent_activation": "hard_sigmoid",
43 | "use_bias": true,
44 | "kernel_initializer": {
45 | "class_name": "VarianceScaling",
46 | "config": {
47 | "scale": 1.0,
48 | "mode": "fan_avg",
49 | "distribution": "uniform",
50 | "seed": null
51 | }
52 | },
53 | "recurrent_initializer": {
54 | "class_name": "VarianceScaling",
55 | "config": {
56 | "scale": 1.0,
57 | "mode": "fan_avg",
58 | "distribution": "uniform",
59 | "seed": null
60 | }
61 | },
62 | "bias_initializer": {
63 | "class_name": "Zeros",
64 | "config": {}
65 | },
66 | "unit_forget_bias": true,
67 | "kernel_regularizer": null,
68 | "recurrent_regularizer": null,
69 | "bias_regularizer": null,
70 | "activity_regularizer": null,
71 | "kernel_constraint": null,
72 | "recurrent_constraint": null,
73 | "bias_constraint": null,
74 | "dropout": 0.0,
75 | "recurrent_dropout": 0.0,
76 | "implementation": 1
77 | },
78 | "inbound_nodes": [
79 | [
80 | ["input_1", 0, 0, {}]
81 | ]
82 | ]
83 | }, {
84 | "name": "lstm_2",
85 | "class_name": "LSTM",
86 | "config": {
87 | "name": "lstm_2",
88 | "trainable": true,
89 | "return_sequences": true,
90 | "return_state": true,
91 | "go_backwards": false,
92 | "stateful": false,
93 | "unroll": false,
94 | "units": 256,
95 | "activation": "tanh",
96 | "recurrent_activation": "hard_sigmoid",
97 | "use_bias": true,
98 | "kernel_initializer": {
99 | "class_name": "VarianceScaling",
100 | "config": {
101 | "scale": 1.0,
102 | "mode": "fan_avg",
103 | "distribution": "uniform",
104 | "seed": null
105 | }
106 | },
107 | "recurrent_initializer": {
108 | "class_name": "VarianceScaling",
109 | "config": {
110 | "scale": 1.0,
111 | "mode": "fan_avg",
112 | "distribution": "uniform",
113 | "seed": null
114 | }
115 | },
116 | "bias_initializer": {
117 | "class_name": "Zeros",
118 | "config": {}
119 | },
120 | "unit_forget_bias": true,
121 | "kernel_regularizer": null,
122 | "recurrent_regularizer": null,
123 | "bias_regularizer": null,
124 | "activity_regularizer": null,
125 | "kernel_constraint": null,
126 | "recurrent_constraint": null,
127 | "bias_constraint": null,
128 | "dropout": 0.0,
129 | "recurrent_dropout": 0.0,
130 | "implementation": 1
131 | },
132 | "inbound_nodes": [
133 | [
134 | ["input_2", 0, 0, {}],
135 | ["lstm_1", 0, 1, {}],
136 | ["lstm_1", 0, 2, {}]
137 | ]
138 | ]
139 | }, {
140 | "name": "dense_1",
141 | "class_name": "Dense",
142 | "config": {
143 | "name": "dense_1",
144 | "trainable": true,
145 | "units": 101,
146 | "activation": "softmax",
147 | "use_bias": true,
148 | "kernel_initializer": {
149 | "class_name": "VarianceScaling",
150 | "config": {
151 | "scale": 1.0,
152 | "mode": "fan_avg",
153 | "distribution": "uniform",
154 | "seed": null
155 | }
156 | },
157 | "bias_initializer": {
158 | "class_name": "Zeros",
159 | "config": {}
160 | },
161 | "kernel_regularizer": null,
162 | "bias_regularizer": null,
163 | "activity_regularizer": null,
164 | "kernel_constraint": null,
165 | "bias_constraint": null
166 | },
167 | "inbound_nodes": [
168 | [
169 | ["lstm_2", 0, 0, {}]
170 | ]
171 | ]
172 | }],
173 | "input_layers": [
174 | ["input_1", 0, 0],
175 | ["input_2", 0, 0]
176 | ],
177 | "output_layers": [
178 | ["dense_1", 0, 0]
179 | ]
180 | }
181 | },
182 | "training_config": {
183 | "optimizer_config": {
184 | "class_name": "RMSprop",
185 | "config": {
186 | "lr": 0.0010000000474974513,
187 | "rho": 0.8999999761581421,
188 | "decay": 0.0,
189 | "epsilon": 1e-07
190 | }
191 | },
192 | "loss": "categorical_crossentropy",
193 | "metrics": [],
194 | "sample_weight_mode": null,
195 | "loss_weights": null
196 | }
197 | },
198 | "weightsManifest": [{
199 | "paths": ["group1-shard1of1.txt"],
200 | "weights": [{
201 | "name": "dense_1/kernel",
202 | "shape": [256, 101],
203 | "dtype": "float32"
204 | }, {
205 | "name": "dense_1/bias",
206 | "shape": [101],
207 | "dtype": "float32"
208 | }, {
209 | "name": "lstm_1/kernel",
210 | "shape": [48, 1024],
211 | "dtype": "float32"
212 | }, {
213 | "name": "lstm_1/recurrent_kernel",
214 | "shape": [256, 1024],
215 | "dtype": "float32"
216 | }, {
217 | "name": "lstm_1/bias",
218 | "shape": [1024],
219 | "dtype": "float32"
220 | }, {
221 | "name": "lstm_2/kernel",
222 | "shape": [101, 1024],
223 | "dtype": "float32"
224 | }, {
225 | "name": "lstm_2/recurrent_kernel",
226 | "shape": [256, 1024],
227 | "dtype": "float32"
228 | }, {
229 | "name": "lstm_2/bias",
230 | "shape": [1024],
231 | "dtype": "float32"
232 | }]
233 | }]
234 | }
--------------------------------------------------------------------------------
/translation/python/translation.py:
--------------------------------------------------------------------------------
1 | # Copyright 2018 Google LLC. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # =============================================================================
15 |
16 | """Train a simple LSTM model for character-level language translation.
17 |
18 | This is based on the Keras example at:
19 | https://github.com/keras-team/keras/blob/master/examples/lstm_seq2seq.py
20 |
21 | The training data can be downloaded with a command like the following example:
22 | wget http://www.manythings.org/anki/fra-eng.zip
23 | """
24 |
25 | from __future__ import absolute_import
26 | from __future__ import division
27 | from __future__ import print_function
28 |
29 | import argparse
30 | import io
31 | import json
32 | import os
33 |
34 | from keras.models import Model
35 | from keras.layers import Input, LSTM, Dense
36 | import numpy as np
37 | import tensorflowjs as tfjs
38 |
39 | data_path = './vie.txt';
40 |
41 |
42 | def read_data():
43 | print('working here')
44 | # Vectorize the data.
45 | input_texts = []
46 | target_texts = []
47 | input_characters = set()
48 | target_characters = set()
49 | lines = io.open(data_path, 'r', encoding='utf-8').read().split('\n')
50 | print(lines)
51 | for line in lines[: min(FLAGS.num_samples, len(lines) - 1)]:
52 | input_text, target_text = line.split('\t')
53 | # We use "tab" as the "start sequence" character for the targets, and "\n"
54 | # as "end sequence" character.
55 | target_text = '\t' + target_text + '\n'
56 | input_texts.append(input_text)
57 | target_texts.append(target_text)
58 | for char in input_text:
59 | if char not in input_characters:
60 | input_characters.add(char)
61 | for char in target_text:
62 | if char not in target_characters:
63 | target_characters.add(char)
64 |
65 | input_characters = sorted(list(input_characters))
66 | target_characters = sorted(list(target_characters))
67 | num_encoder_tokens = len(input_characters)
68 | num_decoder_tokens = len(target_characters)
69 | max_encoder_seq_length = max([len(txt) for txt in input_texts])
70 | max_decoder_seq_length = max([len(txt) for txt in target_texts])
71 |
72 | print('Number of samples:', len(input_texts))
73 | print('Number of unique input tokens:', num_encoder_tokens)
74 | print('Number of unique output tokens:', num_decoder_tokens)
75 | print('Max sequence length for inputs:', max_encoder_seq_length)
76 | print('Max sequence length for outputs:', max_decoder_seq_length)
77 |
78 | input_token_index = dict(
79 | [(char, i) for i, char in enumerate(input_characters)])
80 | target_token_index = dict(
81 | [(char, i) for i, char in enumerate(target_characters)])
82 |
83 | # Save the token indices to file.
84 | metadata_json_path = os.path.join(
85 | FLAGS.artifacts_dir, 'metadata.json')
86 | if not os.path.isdir(os.path.dirname(metadata_json_path)):
87 | os.makedirs(os.path.dirname(metadata_json_path))
88 | with io.open(metadata_json_path, 'w', encoding='utf-8') as f:
89 | metadata = {
90 | 'input_token_index': input_token_index,
91 | 'target_token_index': target_token_index,
92 | 'max_encoder_seq_length': max_encoder_seq_length,
93 | 'max_decoder_seq_length': max_decoder_seq_length
94 | }
95 | f.write(json.dumps(metadata, ensure_ascii=False))
96 | print('Saved metadata at: %s' % metadata_json_path)
97 |
98 | encoder_input_data = np.zeros(
99 | (len(input_texts), max_encoder_seq_length, num_encoder_tokens),
100 | dtype='float32')
101 | decoder_input_data = np.zeros(
102 | (len(input_texts), max_decoder_seq_length, num_decoder_tokens),
103 | dtype='float32')
104 | decoder_target_data = np.zeros(
105 | (len(input_texts), max_decoder_seq_length, num_decoder_tokens),
106 | dtype='float32')
107 |
108 | for i, (input_text, target_text) in enumerate(zip(input_texts, target_texts)):
109 | for t, char in enumerate(input_text):
110 | encoder_input_data[i, t, input_token_index[char]] = 1.
111 | for t, char in enumerate(target_text):
112 | # decoder_target_data is ahead of decoder_input_data by one timestep
113 | decoder_input_data[i, t, target_token_index[char]] = 1.
114 | if t > 0:
115 | # decoder_target_data will be ahead by one timestep
116 | # and will not include the start character.
117 | decoder_target_data[i, t - 1, target_token_index[char]] = 1.
118 |
119 | return (input_texts, max_encoder_seq_length, max_decoder_seq_length,
120 | num_encoder_tokens, num_decoder_tokens,
121 | input_token_index, target_token_index,
122 | encoder_input_data, decoder_input_data, decoder_target_data)
123 |
124 |
125 | def seq2seq_model(num_encoder_tokens, num_decoder_tokens, latent_dim):
126 | """Create a Keras model for the seq2seq translation.
127 |
128 | Args:
129 | num_encoder_tokens: Total number of distinct tokens in the inputs
130 | to the encoder.
131 | num_decoder_tokens: Total number of distinct tokens in the outputs
132 | to/from the decoder
133 | latent_dim: Number of latent dimensions in the LSTMs.
134 |
135 | Returns:
136 | encoder_inputs: Instance of `keras.Input`, symbolic tensor as input to
137 | the encoder LSTM.
138 | encoder_states: Instance of `keras.Input`, symbolic tensor for output
139 | states (h and c) from the encoder LSTM.
140 | decoder_inputs: Instance of `keras.Input`, symbolic tensor as input to
141 | the decoder LSTM.
142 | decoder_lstm: `keras.Layer` instance, the decoder LSTM.
143 | decoder_dense: `keras.Layer` instance, the Dense layer in the decoder.
144 | model: `keras.Model` instance, the entire translation model that can be
145 | used in training.
146 | """
147 | # Define an input sequence and process it.
148 | encoder_inputs = Input(shape=(None, num_encoder_tokens))
149 | encoder = LSTM(latent_dim,
150 | return_state=True,
151 | recurrent_initializer=FLAGS.recurrent_initializer)
152 | _, state_h, state_c = encoder(encoder_inputs)
153 | # We discard `encoder_outputs` and only keep the states.
154 | encoder_states = [state_h, state_c]
155 |
156 | # Set up the decoder, using `encoder_states` as initial state.
157 | decoder_inputs = Input(shape=(None, num_decoder_tokens))
158 | # We set up our decoder to return full output sequences,
159 | # and to return internal states as well. We don't use the
160 | # return states in the training model, but we will use them in inference.
161 | decoder_lstm = LSTM(FLAGS.latent_dim,
162 | return_sequences=True,
163 | return_state=True,
164 | recurrent_initializer=FLAGS.recurrent_initializer)
165 | decoder_outputs, _, _ = decoder_lstm(decoder_inputs,
166 | initial_state=encoder_states)
167 | decoder_dense = Dense(num_decoder_tokens, activation='softmax')
168 | decoder_outputs = decoder_dense(decoder_outputs)
169 |
170 | # Define the model that will turn
171 | # `encoder_input_data` & `decoder_input_data` into `decoder_target_data`
172 | model = Model([encoder_inputs, decoder_inputs], decoder_outputs)
173 | return (encoder_inputs, encoder_states, decoder_inputs, decoder_lstm,
174 | decoder_dense, model)
175 |
176 |
177 | def decode_sequence(input_seq,
178 | encoder_model,
179 | decoder_model,
180 | num_decoder_tokens,
181 | target_begin_index,
182 | reverse_target_char_index,
183 | max_decoder_seq_length):
184 | """Decode (i.e., translate) an encoded sentence.
185 |
186 | Args:
187 | input_seq: A `numpy.ndarray` of shape
188 | `(1, max_encoder_seq_length, num_encoder_tokens)`.
189 | encoder_model: A `keras.Model` instance for the encoder.
190 | decoder_model: A `keras.Model` instance for the decoder.
191 | num_decoder_tokens: Number of unique tokens for the decoder.
192 | target_begin_index: An `int`: the index for the beginning token of the
193 | decoder.
194 | reverse_target_char_index: A lookup table for the target characters, i.e.,
195 | a map from `int` index to target character.
196 | max_decoder_seq_length: Maximum allowed sequence length output by the
197 | decoder.
198 |
199 | Returns:
200 | The result of the decoding (i.e., translation) as a string.
201 | """
202 |
203 | # Encode the input as state vectors.
204 | states_value = encoder_model.predict(input_seq)
205 |
206 | # Generate empty target sequence of length 1.
207 | target_seq = np.zeros((1, 1, num_decoder_tokens))
208 | # Populate the first character of target sequence with the start character.
209 | target_seq[0, 0, target_begin_index] = 1.
210 |
211 | # Sampling loop for a batch of sequences
212 | # (to simplify, here we assume a batch of size 1).
213 | stop_condition = False
214 | decoded_sentence = ''
215 | while not stop_condition:
216 | output_tokens, h, c = decoder_model.predict(
217 | [target_seq] + states_value)
218 |
219 | # Sample a token
220 | sampled_token_index = np.argmax(output_tokens[0, -1, :])
221 | sampled_char = reverse_target_char_index[sampled_token_index]
222 | decoded_sentence += sampled_char
223 |
224 | # Exit condition: either hit max length
225 | # or find stop character.
226 | if (sampled_char == '\n' or
227 | len(decoded_sentence) > max_decoder_seq_length):
228 | stop_condition = True
229 |
230 | # Update the target sequence (of length 1).
231 | target_seq = np.zeros((1, 1, num_decoder_tokens))
232 | target_seq[0, 0, sampled_token_index] = 1.
233 |
234 | # Update states
235 | states_value = [h, c]
236 |
237 | return decoded_sentence
238 |
239 |
240 | def main():
241 | print("=====Going here")
242 | (input_texts, _, max_decoder_seq_length,
243 | num_encoder_tokens, num_decoder_tokens,
244 | __, target_token_index,
245 | encoder_input_data, decoder_input_data, decoder_target_data) = read_data()
246 |
247 | (encoder_inputs, encoder_states, decoder_inputs, decoder_lstm,
248 | decoder_dense, model) = seq2seq_model(
249 | num_encoder_tokens, num_decoder_tokens, FLAGS.latent_dim)
250 |
251 | # Run training.
252 | model.compile(optimizer='rmsprop', loss='categorical_crossentropy')
253 | model.fit([encoder_input_data, decoder_input_data], decoder_target_data,
254 | batch_size=FLAGS.batch_size,
255 | epochs=FLAGS.epochs,
256 | validation_split=0.2)
257 |
258 | # Convert to TensorFlowJS model
259 | tfjs.converters.save_keras_model(model, FLAGS.artifacts_dir)
260 |
261 | # Next: inference mode (sampling).
262 | # Here's the drill:
263 | # 1) encode input and retrieve initial decoder state
264 | # 2) run one step of decoder with this initial state
265 | # and a "start of sequence" token as target.
266 | # Output will be the next target token
267 | # 3) Repeat with the current target token and current states
268 |
269 | # Define sampling models
270 | encoder_model = Model(encoder_inputs, encoder_states)
271 |
272 | decoder_state_input_h = Input(shape=(FLAGS.latent_dim,))
273 | decoder_state_input_c = Input(shape=(FLAGS.latent_dim,))
274 | decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
275 | decoder_outputs, state_h, state_c = decoder_lstm(
276 | decoder_inputs, initial_state=decoder_states_inputs)
277 | decoder_states = [state_h, state_c]
278 | decoder_outputs = decoder_dense(decoder_outputs)
279 | decoder_model = Model(
280 | [decoder_inputs] + decoder_states_inputs,
281 | [decoder_outputs] + decoder_states)
282 |
283 | # Reverse-lookup token index to decode sequences back to
284 | # something readable.
285 | reverse_target_char_index = dict(
286 | (i, char) for char, i in target_token_index.items())
287 |
288 | target_begin_index = target_token_index['\t']
289 |
290 | for seq_index in range(FLAGS.num_test_sentences):
291 | # Take one sequence (part of the training set)
292 | # for trying out decoding.
293 | input_seq = encoder_input_data[seq_index: seq_index + 1]
294 | decoded_sentence = decode_sequence(
295 | input_seq, encoder_model, decoder_model, num_decoder_tokens,
296 | target_begin_index, reverse_target_char_index, max_decoder_seq_length)
297 | print('-')
298 | print('Input sentence:', input_texts[seq_index])
299 | print('Decoded sentence:', decoded_sentence)
300 |
301 |
302 | if __name__ == '__main__':
303 | parser = argparse.ArgumentParser(
304 | 'Keras seq2seq translation model training and serialization')
305 | parser.add_argument(
306 | 'data_path',
307 | type=str,
308 | help='Path to the training data, e.g., ~/ml-data/fra-eng/fra.txt')
309 | parser.add_argument(
310 | '--batch_size',
311 | type=int,
312 | default=64,
313 | help='Training batch size.')
314 | parser.add_argument(
315 | '--epochs',
316 | type=int,
317 | default=100,
318 | help='Number of training epochs.')
319 | parser.add_argument(
320 | '--latent_dim',
321 | type=int,
322 | default=256,
323 | help='Latent dimensionality of the encoding space.')
324 | parser.add_argument(
325 | '--num_samples',
326 | type=int,
327 | default=10000,
328 | help='Number of samples to train on.')
329 | parser.add_argument(
330 | '--num_test_sentences',
331 | type=int,
332 | default=100,
333 | help='Number of example sentences to test at the end of the training.')
334 | # TODO(cais): This is a workaround for the limitation in TF.js Layers that the
335 | # default recurrent initializer "Orthogonal" is currently not supported.
336 | # Remove this once "Orthogonal" becomes available.
337 | parser.add_argument(
338 | '--recurrent_initializer',
339 | type=str,
340 | default='orthogonal',
341 | help='Custom initializer for recurrent kernels of LSTMs (e.g., '
342 | 'glorot_uniform)')
343 | parser.add_argument(
344 | '--artifacts_dir',
345 | type=str,
346 | default='/tmp/translation.keras',
347 | help='Local path for saving the TensorFlow.js artifacts.')
348 |
349 | FLAGS, _ = parser.parse_known_args()
350 | main()
351 |
--------------------------------------------------------------------------------
/translation/python/translation.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 6,
6 | "metadata": {},
7 | "outputs": [
8 | {
9 | "name": "stderr",
10 | "output_type": "stream",
11 | "text": [
12 | "Using TensorFlow backend.\n"
13 | ]
14 | },
15 | {
16 | "ename": "TypeError",
17 | "evalue": "__new__() got an unexpected keyword argument 'serialized_options'",
18 | "output_type": "error",
19 | "traceback": [
20 | "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
21 | "\u001b[0;31mTypeError\u001b[0m Traceback (most recent call last)",
22 | "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 8\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mos\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 9\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 10\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0mkeras\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmodels\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mModel\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 11\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mkeras\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlayers\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mInput\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mLSTM\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mDense\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 12\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mnumpy\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
23 | "\u001b[0;32m/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/keras/__init__.py\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0m__future__\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mabsolute_import\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 3\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0;34m.\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mutils\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 4\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0;34m.\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mactivations\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0;34m.\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mapplications\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
24 | "\u001b[0;32m/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/keras/utils/__init__.py\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0;34m.\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mdata_utils\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0;34m.\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mio_utils\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 6\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0;34m.\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mconv_utils\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 7\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 8\u001b[0m \u001b[0;31m# Globally-importable utils.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
25 | "\u001b[0;32m/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/keras/utils/conv_utils.py\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 7\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0msix\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmoves\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mrange\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 8\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mnumpy\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 9\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0;34m.\u001b[0m\u001b[0;34m.\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mbackend\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mK\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 10\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 11\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
26 | "\u001b[0;32m/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/keras/backend/__init__.py\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 87\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0m_BACKEND\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;34m'tensorflow'\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 88\u001b[0m \u001b[0msys\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstderr\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mwrite\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'Using TensorFlow backend.\\n'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 89\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0;34m.\u001b[0m\u001b[0mtensorflow_backend\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 90\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 91\u001b[0m \u001b[0;31m# Try and load external backend.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
27 | "\u001b[0;32m/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0m__future__\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mprint_function\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 5\u001b[0;31m \u001b[0;32mimport\u001b[0m \u001b[0mtensorflow\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mtf\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 6\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mframework\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mops\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mtf_ops\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 7\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtraining\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mmoving_averages\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
28 | "\u001b[0;32m/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/__init__.py\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 22\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 23\u001b[0m \u001b[0;31m# pylint: disable=g-bad-import-order\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 24\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpython\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mpywrap_tensorflow\u001b[0m \u001b[0;31m# pylint: disable=unused-import\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 25\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 26\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
29 | "\u001b[0;32m/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/__init__.py\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 57\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 58\u001b[0m \u001b[0;31m# Protocol buffers\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 59\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcore\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mframework\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgraph_pb2\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 60\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcore\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mframework\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnode_def_pb2\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 61\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcore\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mframework\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msummary_pb2\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
30 | "\u001b[0;32m/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/core/framework/graph_pb2.py\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 13\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 14\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 15\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcore\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mframework\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mnode_def_pb2\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mtensorflow_dot_core_dot_framework_dot_node__def__pb2\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 16\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcore\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mframework\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mfunction_pb2\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mtensorflow_dot_core_dot_framework_dot_function__pb2\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 17\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcore\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mframework\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mversions_pb2\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mtensorflow_dot_core_dot_framework_dot_versions__pb2\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
31 | "\u001b[0;32m/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/core/framework/node_def_pb2.py\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 13\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 14\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 15\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcore\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mframework\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mattr_value_pb2\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mtensorflow_dot_core_dot_framework_dot_attr__value__pb2\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 16\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 17\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
32 | "\u001b[0;32m/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/core/framework/attr_value_pb2.py\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 13\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 14\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 15\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcore\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mframework\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mtensor_pb2\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mtensorflow_dot_core_dot_framework_dot_tensor__pb2\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 16\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcore\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mframework\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mtensor_shape_pb2\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mtensorflow_dot_core_dot_framework_dot_tensor__shape__pb2\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 17\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcore\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mframework\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mtypes_pb2\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mtensorflow_dot_core_dot_framework_dot_types__pb2\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
33 | "\u001b[0;32m/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/core/framework/tensor_pb2.py\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 13\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 14\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 15\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcore\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mframework\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mresource_handle_pb2\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mtensorflow_dot_core_dot_framework_dot_resource__handle__pb2\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 16\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcore\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mframework\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mtensor_shape_pb2\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mtensorflow_dot_core_dot_framework_dot_tensor__shape__pb2\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 17\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcore\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mframework\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mtypes_pb2\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mtensorflow_dot_core_dot_framework_dot_types__pb2\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
34 | "\u001b[0;32m/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/core/framework/resource_handle_pb2.py\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 20\u001b[0m \u001b[0msyntax\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'proto3'\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 21\u001b[0m \u001b[0mserialized_options\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0m_b\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'\\n\\030org.tensorflow.frameworkB\\016ResourceHandleP\\001Z=github.com/tensorflow/tensorflow/tensorflow/go/core/framework\\370\\001\\001'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 22\u001b[0;31m \u001b[0mserialized_pb\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0m_b\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'\\n/tensorflow/core/framework/resource_handle.proto\\x12\\ntensorflow\\\"r\\n\\x13ResourceHandleProto\\x12\\x0e\\n\\x06\\x64\\x65vice\\x18\\x01 \\x01(\\t\\x12\\x11\\n\\tcontainer\\x18\\x02 \\x01(\\t\\x12\\x0c\\n\\x04name\\x18\\x03 \\x01(\\t\\x12\\x11\\n\\thash_code\\x18\\x04 \\x01(\\x04\\x12\\x17\\n\\x0fmaybe_type_name\\x18\\x05 \\x01(\\tBn\\n\\x18org.tensorflow.frameworkB\\x0eResourceHandleP\\x01Z=github.com/tensorflow/tensorflow/tensorflow/go/core/framework\\xf8\\x01\\x01\\x62\\x06proto3'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 23\u001b[0m )\n\u001b[1;32m 24\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
35 | "\u001b[0;31mTypeError\u001b[0m: __new__() got an unexpected keyword argument 'serialized_options'"
36 | ]
37 | }
38 | ],
39 | "source": [
40 | "from __future__ import absolute_import\n",
41 | "from __future__ import division\n",
42 | "from __future__ import print_function\n",
43 | "\n",
44 | "import argparse\n",
45 | "import io\n",
46 | "import json\n",
47 | "import os\n",
48 | "\n",
49 | "from keras.models import Model\n",
50 | "from keras.layers import Input, LSTM, Dense\n",
51 | "import numpy as np\n",
52 | "import tensorflowjs as tfjs\n",
53 | "\n",
54 | "data_path = './vie.txt';"
55 | ]
56 | },
57 | {
58 | "cell_type": "code",
59 | "execution_count": 5,
60 | "metadata": {},
61 | "outputs": [
62 | {
63 | "name": "stderr",
64 | "output_type": "stream",
65 | "text": [
66 | "Using TensorFlow backend.\n"
67 | ]
68 | },
69 | {
70 | "ename": "TypeError",
71 | "evalue": "__new__() got an unexpected keyword argument 'serialized_options'",
72 | "output_type": "error",
73 | "traceback": [
74 | "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
75 | "\u001b[0;31mTypeError\u001b[0m Traceback (most recent call last)",
76 | "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0mkeras\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmodels\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mModel\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
77 | "\u001b[0;32m/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/keras/__init__.py\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0m__future__\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mabsolute_import\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 3\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0;34m.\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mutils\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 4\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0;34m.\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mactivations\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0;34m.\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mapplications\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
78 | "\u001b[0;32m/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/keras/utils/__init__.py\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0;34m.\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mdata_utils\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0;34m.\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mio_utils\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 6\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0;34m.\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mconv_utils\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 7\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 8\u001b[0m \u001b[0;31m# Globally-importable utils.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
79 | "\u001b[0;32m/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/keras/utils/conv_utils.py\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 7\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0msix\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmoves\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mrange\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 8\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mnumpy\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 9\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0;34m.\u001b[0m\u001b[0;34m.\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mbackend\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mK\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 10\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 11\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
80 | "\u001b[0;32m/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/keras/backend/__init__.py\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 87\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0m_BACKEND\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;34m'tensorflow'\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 88\u001b[0m \u001b[0msys\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstderr\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mwrite\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'Using TensorFlow backend.\\n'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 89\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0;34m.\u001b[0m\u001b[0mtensorflow_backend\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 90\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 91\u001b[0m \u001b[0;31m# Try and load external backend.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
81 | "\u001b[0;32m/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0m__future__\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mprint_function\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 5\u001b[0;31m \u001b[0;32mimport\u001b[0m \u001b[0mtensorflow\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mtf\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 6\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mframework\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mops\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mtf_ops\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 7\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtraining\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mmoving_averages\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
82 | "\u001b[0;32m/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/__init__.py\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 22\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 23\u001b[0m \u001b[0;31m# pylint: disable=g-bad-import-order\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 24\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpython\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mpywrap_tensorflow\u001b[0m \u001b[0;31m# pylint: disable=unused-import\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 25\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 26\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
83 | "\u001b[0;32m/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/__init__.py\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 57\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 58\u001b[0m \u001b[0;31m# Protocol buffers\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 59\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcore\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mframework\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgraph_pb2\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 60\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcore\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mframework\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnode_def_pb2\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 61\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcore\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mframework\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msummary_pb2\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
84 | "\u001b[0;32m/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/core/framework/graph_pb2.py\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 13\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 14\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 15\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcore\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mframework\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mnode_def_pb2\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mtensorflow_dot_core_dot_framework_dot_node__def__pb2\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 16\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcore\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mframework\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mfunction_pb2\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mtensorflow_dot_core_dot_framework_dot_function__pb2\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 17\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcore\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mframework\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mversions_pb2\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mtensorflow_dot_core_dot_framework_dot_versions__pb2\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
85 | "\u001b[0;32m/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/core/framework/node_def_pb2.py\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 13\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 14\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 15\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcore\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mframework\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mattr_value_pb2\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mtensorflow_dot_core_dot_framework_dot_attr__value__pb2\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 16\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 17\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
86 | "\u001b[0;32m/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/core/framework/attr_value_pb2.py\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 13\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 14\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 15\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcore\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mframework\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mtensor_pb2\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mtensorflow_dot_core_dot_framework_dot_tensor__pb2\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 16\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcore\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mframework\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mtensor_shape_pb2\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mtensorflow_dot_core_dot_framework_dot_tensor__shape__pb2\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 17\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcore\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mframework\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mtypes_pb2\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mtensorflow_dot_core_dot_framework_dot_types__pb2\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
87 | "\u001b[0;32m/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/core/framework/tensor_pb2.py\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 13\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 14\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 15\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcore\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mframework\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mresource_handle_pb2\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mtensorflow_dot_core_dot_framework_dot_resource__handle__pb2\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 16\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcore\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mframework\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mtensor_shape_pb2\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mtensorflow_dot_core_dot_framework_dot_tensor__shape__pb2\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 17\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcore\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mframework\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mtypes_pb2\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mtensorflow_dot_core_dot_framework_dot_types__pb2\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
88 | "\u001b[0;32m/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/core/framework/resource_handle_pb2.py\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 20\u001b[0m \u001b[0msyntax\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'proto3'\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 21\u001b[0m \u001b[0mserialized_options\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0m_b\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'\\n\\030org.tensorflow.frameworkB\\016ResourceHandleP\\001Z=github.com/tensorflow/tensorflow/tensorflow/go/core/framework\\370\\001\\001'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 22\u001b[0;31m \u001b[0mserialized_pb\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0m_b\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'\\n/tensorflow/core/framework/resource_handle.proto\\x12\\ntensorflow\\\"r\\n\\x13ResourceHandleProto\\x12\\x0e\\n\\x06\\x64\\x65vice\\x18\\x01 \\x01(\\t\\x12\\x11\\n\\tcontainer\\x18\\x02 \\x01(\\t\\x12\\x0c\\n\\x04name\\x18\\x03 \\x01(\\t\\x12\\x11\\n\\thash_code\\x18\\x04 \\x01(\\x04\\x12\\x17\\n\\x0fmaybe_type_name\\x18\\x05 \\x01(\\tBn\\n\\x18org.tensorflow.frameworkB\\x0eResourceHandleP\\x01Z=github.com/tensorflow/tensorflow/tensorflow/go/core/framework\\xf8\\x01\\x01\\x62\\x06proto3'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 23\u001b[0m )\n\u001b[1;32m 24\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
89 | "\u001b[0;31mTypeError\u001b[0m: __new__() got an unexpected keyword argument 'serialized_options'"
90 | ]
91 | }
92 | ],
93 | "source": [
94 | "from keras.models import Model"
95 | ]
96 | },
97 | {
98 | "cell_type": "code",
99 | "execution_count": null,
100 | "metadata": {},
101 | "outputs": [],
102 | "source": []
103 | }
104 | ],
105 | "metadata": {
106 | "kernelspec": {
107 | "display_name": "Python 3",
108 | "language": "python",
109 | "name": "python3"
110 | },
111 | "language_info": {
112 | "codemirror_mode": {
113 | "name": "ipython",
114 | "version": 3
115 | },
116 | "file_extension": ".py",
117 | "mimetype": "text/x-python",
118 | "name": "python",
119 | "nbconvert_exporter": "python",
120 | "pygments_lexer": "ipython3",
121 | "version": "3.6.5"
122 | }
123 | },
124 | "nbformat": 4,
125 | "nbformat_minor": 2
126 | }
127 |
--------------------------------------------------------------------------------