├── .gitignore ├── LICENSE ├── README.md ├── demo ├── figures │ ├── figure-animated-ring.gif │ ├── figure-data-selected.png │ ├── figure-discriminator-heatmap.png │ ├── figure-generator-transformation.png │ ├── figure-gradients-interplay.png │ ├── figure-icon-draw.png │ ├── figure-icon-edit.png │ ├── figure-icon-play.png │ ├── figure-icon-slow.png │ ├── figure-icon-step.png │ └── figure-paper-screenshot.png ├── ganlab.html ├── ganlab.ts ├── ganlab_drawing.ts ├── ganlab_evaluators.ts ├── ganlab_input_providers.ts ├── ganlab_models.ts ├── images │ ├── distribution-disjoint.png │ ├── distribution-gaussians.png │ ├── distribution-line.png │ ├── distribution-ring.png │ ├── github-mark.png │ └── share.png ├── index.html └── pretrained_models │ ├── pretrained_disjoint.json │ ├── pretrained_disjoint.weights.bin │ ├── pretrained_gaussians.json │ ├── pretrained_gaussians.weights.bin │ ├── pretrained_line.json │ ├── pretrained_line.weights.bin │ ├── pretrained_ring.json │ └── pretrained_ring.weights.bin ├── ganlab-teaser.png ├── lib ├── chartjs.d.ts ├── polymer-spec.ts └── support.js ├── output └── README.md ├── package.json ├── scripts ├── build-demo ├── deploy-demo └── watch-demo ├── tsconfig-dev.json ├── tsconfig.json └── yarn.lock /.gitignore: -------------------------------------------------------------------------------- 1 | node_modules/ 2 | demo/bundle.js 3 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # GAN Lab: An Interactive, Visual Experimentation Tool for Generative Adversarial Networks 2 | 3 | By 4 | [Minsuk Kahng](http://minsuk.com), 5 | [Nikhil Thorat](https://twitter.com/nsthorat), 6 | [Polo Chau](https://www.cc.gatech.edu/~dchau/), 7 | [Fernanda Viégas](http://fernandaviegas.com/), and 8 | [Martin Wattenberg](http://www.bewitched.com/) 9 | 10 | ## Overview 11 | 12 | GAN Lab is a novel interactive visualization tool for anyone to learn and experiment with Generative Adversarial Networks (GANs), a popular class of complex deep learning models. With GAN Lab, you can interactively train GAN models for 2D data distributions and visualize their inner-workings, similar to [TensorFlow Playground](http://playground.tensorflow.org/). 13 | 14 | GAN Lab uses [TensorFlow.js](https://js.tensorflow.org/), an in-browser GPU-accelerated deep learning library. Everything, from model training to visualization, is implemented with JavaScript. Users only need a web browser like Chrome to run GAN Lab. Our implementation approach significantly broadens people's access to interactive tools for deep learning. 15 | 16 | ![Screenshot of GAN Lab](ganlab-teaser.png) 17 | 18 | 19 | ## Working Demo 20 | 21 | Click the following link: 22 | 23 | [https://poloclub.github.io/ganlab/](https://poloclub.github.io/ganlab/) 24 | 25 | It runs on most modern web browsers. We suggest you use Google Chrome. 26 | 27 | 28 | ## Development 29 | 30 | This section describes how you can develop GAN Lab. 31 | 32 | ### Install Dependencies 33 | 34 | Run the following commands: 35 | 36 | ```bash 37 | $ git clone https://github.com/poloclub/ganlab.git 38 | $ cd ganlab 39 | $ yarn prep 40 | ``` 41 | 42 | It's unlikely, but you may need to install some basic JavaScript-related dependencies (e.g., yarn). 43 | 44 | 45 | ### Running Your Demo 46 | 47 | Run the following command: 48 | 49 | ```bash 50 | $ ./scripts/watch-demo 51 | 52 | >> Waiting for initial compile... 53 | >> 3462522 bytes written to demo/bundle.js (2.17 seconds) at 00:00:00 54 | >> Starting up http-server, serving ./ 55 | >> Available on: 56 | >> http://127.0.0.1:8080 57 | >> Hit CTRL-C to stop the server 58 | ``` 59 | 60 | Then visit `http://localhost:8080/demo/`. 61 | 62 | The `watch-demo` script monitors for changes of typescript code (e.g., `demo/ganlab.ts`) 63 | and compiles the code for you. 64 | 65 | 66 | ## Credit 67 | 68 | GAN Lab was created by 69 | [Minsuk Kahng](http://minsuk.com), 70 | [Nikhil Thorat](https://twitter.com/nsthorat), 71 | [Polo Chau](https://www.cc.gatech.edu/~dchau/), 72 | [Fernanda Viégas](http://www.fernandaviegas.com/), and 73 | [Martin Wattenberg](http://www.bewitched.com/), 74 | which was the result of a research collaboration between Georgia Tech and Google Brain/[PAIR](https://ai.google/research/teams/brain/pair). 75 | We also thank Shan Carter and Daniel Smilkov, 76 | [Google Big Picture team](https://research.google.com/bigpicture/) and 77 | [Google People + AI Research (PAIR)](https://ai.google/research/teams/brain/pair), and 78 | [Georgia Tech Visualization Lab](http://vis.gatech.edu/) 79 | for their feedback. 80 | 81 | For more information, check out 82 | [our research paper](http://minsuk.com/research/papers/kahng-ganlab-vast2018.pdf): 83 | 84 | [Minsuk Kahng](http://minsuk.com), 85 | [Nikhil Thorat](https://twitter.com/nsthorat), 86 | [Polo Chau](https://www.cc.gatech.edu/~dchau/), 87 | [Fernanda Viégas](http://www.fernandaviegas.com/), and 88 | [Martin Wattenberg](http://www.bewitched.com/). 89 | "GAN Lab: Understanding Complex Deep Generative Models using Interactive Visual Experimentation." 90 | *IEEE Transactions on Visualization and Computer Graphics, 25(1) ([VAST 2018](http://ieeevis.org/year/2018/welcome))*, Jan. 2019. 91 | -------------------------------------------------------------------------------- /demo/figures/figure-animated-ring.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/poloclub/ganlab/b6deea8ec40c38ec2879f33b0dbe1645b59f4ef0/demo/figures/figure-animated-ring.gif -------------------------------------------------------------------------------- /demo/figures/figure-data-selected.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/poloclub/ganlab/b6deea8ec40c38ec2879f33b0dbe1645b59f4ef0/demo/figures/figure-data-selected.png -------------------------------------------------------------------------------- /demo/figures/figure-discriminator-heatmap.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/poloclub/ganlab/b6deea8ec40c38ec2879f33b0dbe1645b59f4ef0/demo/figures/figure-discriminator-heatmap.png -------------------------------------------------------------------------------- /demo/figures/figure-generator-transformation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/poloclub/ganlab/b6deea8ec40c38ec2879f33b0dbe1645b59f4ef0/demo/figures/figure-generator-transformation.png -------------------------------------------------------------------------------- /demo/figures/figure-gradients-interplay.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/poloclub/ganlab/b6deea8ec40c38ec2879f33b0dbe1645b59f4ef0/demo/figures/figure-gradients-interplay.png -------------------------------------------------------------------------------- /demo/figures/figure-icon-draw.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/poloclub/ganlab/b6deea8ec40c38ec2879f33b0dbe1645b59f4ef0/demo/figures/figure-icon-draw.png -------------------------------------------------------------------------------- /demo/figures/figure-icon-edit.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/poloclub/ganlab/b6deea8ec40c38ec2879f33b0dbe1645b59f4ef0/demo/figures/figure-icon-edit.png -------------------------------------------------------------------------------- /demo/figures/figure-icon-play.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/poloclub/ganlab/b6deea8ec40c38ec2879f33b0dbe1645b59f4ef0/demo/figures/figure-icon-play.png -------------------------------------------------------------------------------- /demo/figures/figure-icon-slow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/poloclub/ganlab/b6deea8ec40c38ec2879f33b0dbe1645b59f4ef0/demo/figures/figure-icon-slow.png -------------------------------------------------------------------------------- /demo/figures/figure-icon-step.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/poloclub/ganlab/b6deea8ec40c38ec2879f33b0dbe1645b59f4ef0/demo/figures/figure-icon-step.png -------------------------------------------------------------------------------- /demo/figures/figure-paper-screenshot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/poloclub/ganlab/b6deea8ec40c38ec2879f33b0dbe1645b59f4ef0/demo/figures/figure-paper-screenshot.png -------------------------------------------------------------------------------- /demo/ganlab.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 624 | 1320 | 1321 | -------------------------------------------------------------------------------- /demo/ganlab.ts: -------------------------------------------------------------------------------- 1 | import * as d3 from 'd3-selection'; 2 | import { scaleSequential } from 'd3-scale'; 3 | import { interpolateGreens, interpolatePRGn } from 'd3-scale-chromatic'; 4 | import { line } from 'd3-shape'; 5 | import * as d3Transition from 'd3-transition'; 6 | 7 | import { PolymerElement, PolymerHTMLElement } from '../lib/polymer-spec'; 8 | import * as tf from '@tensorflow/tfjs-core'; 9 | 10 | import * as ganlab_input_providers from './ganlab_input_providers'; 11 | import * as ganlab_drawing from './ganlab_drawing'; 12 | import * as ganlab_evaluators from './ganlab_evaluators'; 13 | import * as ganlab_models from './ganlab_models'; 14 | 15 | const BATCH_SIZE = 150; 16 | const ATLAS_SIZE = 12000; 17 | 18 | const NUM_GRID_CELLS = 30; 19 | const NUM_MANIFOLD_CELLS = 20; 20 | const GRAD_ARROW_UNIT_LEN = 0.15; 21 | const NUM_TRUE_SAMPLES_VISUALIZED = 450; 22 | 23 | const VIS_INTERVAL = 50; 24 | const EPOCH_INTERVAL = 2; 25 | const SLOW_INTERVAL_MS = 1250; 26 | 27 | interface ManifoldCell { 28 | points: Float32Array[]; 29 | area?: number; 30 | } 31 | 32 | // tslint:disable-next-line:variable-name 33 | const GANLabPolymer: new () => PolymerHTMLElement = PolymerElement({ 34 | is: 'gan-lab', 35 | properties: { 36 | dLearningRate: Number, 37 | gLearningRate: Number, 38 | learningRateOptions: Array, 39 | dOptimizerType: String, 40 | gOptimizerType: String, 41 | optimizerTypeOptions: Array, 42 | lossType: String, 43 | lossTypeOptions: Array, 44 | selectedShapeName: String, 45 | shapeNames: Array, 46 | selectedNoiseType: String, 47 | noiseTypes: Array 48 | } 49 | }); 50 | 51 | class GANLab extends GANLabPolymer { 52 | private iterationCount: number; 53 | 54 | private noiseProvider: ganlab_input_providers.InputProvider; 55 | private trueSampleProvider: ganlab_input_providers.InputProvider; 56 | private uniformNoiseProvider: ganlab_input_providers.InputProvider; 57 | private uniformInputProvider: ganlab_input_providers.InputProvider; 58 | 59 | private usePretrained: boolean; 60 | 61 | private model: ganlab_models.GANLabModel; 62 | private noiseSize: number; 63 | private numGeneratorLayers: number; 64 | private numDiscriminatorLayers: number; 65 | private numGeneratorNeurons: number; 66 | private numDiscriminatorNeurons: number; 67 | private kDSteps: number; 68 | private kGSteps: number; 69 | 70 | private plotSizePx: number; 71 | 72 | private gDotsElementList: string[]; 73 | private highlightedComponents: HTMLDivElement[]; 74 | private highlightedTooltip: HTMLDivElement; 75 | 76 | private evaluator: ganlab_evaluators.GANLabEvaluatorGridDensities; 77 | 78 | private canvas: HTMLCanvasElement; 79 | private drawing: ganlab_drawing.GANLabDrawing; 80 | 81 | ready() { 82 | // HTML elements. 83 | const numGeneratorLayersElement = 84 | document.getElementById('num-g-layers') as HTMLElement; 85 | this.numGeneratorLayers = +numGeneratorLayersElement.innerText; 86 | document.getElementById('g-layers-add-button')!.addEventListener( 87 | 'click', () => { 88 | if (this.numGeneratorLayers < 5) { 89 | this.numGeneratorLayers += 1; 90 | numGeneratorLayersElement.innerText = 91 | this.numGeneratorLayers.toString(); 92 | this.disabledPretrainedMode(); 93 | this.createExperiment(); 94 | } 95 | }); 96 | document.getElementById('g-layers-remove-button')!.addEventListener( 97 | 'click', () => { 98 | if (this.numGeneratorLayers > 0) { 99 | this.numGeneratorLayers -= 1; 100 | numGeneratorLayersElement.innerText = 101 | this.numGeneratorLayers.toString(); 102 | this.disabledPretrainedMode(); 103 | this.createExperiment(); 104 | } 105 | }); 106 | 107 | const numDiscriminatorLayersElement = 108 | document.getElementById('num-d-layers') as HTMLElement; 109 | this.numDiscriminatorLayers = +numDiscriminatorLayersElement.innerText; 110 | document.getElementById('d-layers-add-button')!.addEventListener( 111 | 'click', () => { 112 | if (this.numDiscriminatorLayers < 5) { 113 | this.numDiscriminatorLayers += 1; 114 | numDiscriminatorLayersElement.innerText = 115 | this.numDiscriminatorLayers.toString(); 116 | this.disabledPretrainedMode(); 117 | this.createExperiment(); 118 | } 119 | }); 120 | document.getElementById('d-layers-remove-button')!.addEventListener( 121 | 'click', () => { 122 | if (this.numDiscriminatorLayers > 0) { 123 | this.numDiscriminatorLayers -= 1; 124 | numDiscriminatorLayersElement.innerText = 125 | this.numDiscriminatorLayers.toString(); 126 | this.disabledPretrainedMode(); 127 | this.createExperiment(); 128 | } 129 | }); 130 | 131 | const numGeneratorNeuronsElement = 132 | document.getElementById('num-g-neurons') as HTMLElement; 133 | this.numGeneratorNeurons = +numGeneratorNeuronsElement.innerText; 134 | document.getElementById('g-neurons-add-button').addEventListener( 135 | 'click', () => { 136 | if (this.numGeneratorNeurons < 100) { 137 | this.numGeneratorNeurons += 1; 138 | numGeneratorNeuronsElement.innerText = 139 | this.numGeneratorNeurons.toString(); 140 | this.disabledPretrainedMode(); 141 | this.createExperiment(); 142 | } 143 | }); 144 | document.getElementById('g-neurons-remove-button').addEventListener( 145 | 'click', () => { 146 | if (this.numGeneratorNeurons > 0) { 147 | this.numGeneratorNeurons -= 1; 148 | numGeneratorNeuronsElement.innerText = 149 | this.numGeneratorNeurons.toString(); 150 | this.disabledPretrainedMode(); 151 | this.createExperiment(); 152 | } 153 | }); 154 | 155 | const numDiscriminatorNeuronsElement = 156 | document.getElementById('num-d-neurons') as HTMLElement; 157 | this.numDiscriminatorNeurons = +numDiscriminatorNeuronsElement.innerText; 158 | document.getElementById('d-neurons-add-button').addEventListener( 159 | 'click', () => { 160 | if (this.numDiscriminatorNeurons < 100) { 161 | this.numDiscriminatorNeurons += 1; 162 | numDiscriminatorNeuronsElement.innerText = 163 | this.numDiscriminatorNeurons.toString(); 164 | this.disabledPretrainedMode(); 165 | this.createExperiment(); 166 | } 167 | }); 168 | document.getElementById('d-neurons-remove-button').addEventListener( 169 | 'click', () => { 170 | if (this.numDiscriminatorNeurons > 0) { 171 | this.numDiscriminatorNeurons -= 1; 172 | numDiscriminatorNeuronsElement.innerText = 173 | this.numDiscriminatorNeurons.toString(); 174 | this.disabledPretrainedMode(); 175 | this.createExperiment(); 176 | } 177 | }); 178 | 179 | const numKDStepsElement = 180 | document.getElementById('k-d-steps') as HTMLElement; 181 | this.kDSteps = +numKDStepsElement.innerText; 182 | document.getElementById('k-d-steps-add-button')!.addEventListener( 183 | 'click', () => { 184 | if (this.kDSteps < 10) { 185 | this.kDSteps += 1; 186 | numKDStepsElement.innerText = this.kDSteps.toString(); 187 | } 188 | }); 189 | document.getElementById('k-d-steps-remove-button')!.addEventListener( 190 | 'click', () => { 191 | if (this.kDSteps > 0) { 192 | this.kDSteps -= 1; 193 | numKDStepsElement.innerText = this.kDSteps.toString(); 194 | } 195 | }); 196 | 197 | const numKGStepsElement = 198 | document.getElementById('k-g-steps') as HTMLElement; 199 | this.kGSteps = +numKGStepsElement.innerText; 200 | document.getElementById('k-g-steps-add-button')!.addEventListener( 201 | 'click', () => { 202 | if (this.kGSteps < 10) { 203 | this.kGSteps += 1; 204 | numKGStepsElement.innerText = this.kGSteps.toString(); 205 | } 206 | }); 207 | document.getElementById('k-g-steps-remove-button')!.addEventListener( 208 | 'click', () => { 209 | if (this.kGSteps > 0) { 210 | this.kGSteps -= 1; 211 | numKGStepsElement.innerText = this.kGSteps.toString(); 212 | } 213 | }); 214 | 215 | this.lossTypeOptions = ['Log loss', 'LeastSq loss']; 216 | this.lossType = 'Log loss'; 217 | this.querySelector('#loss-type-dropdown')!.addEventListener( 218 | // tslint:disable-next-line:no-any event has no type 219 | 'iron-activate', (event: any) => { 220 | this.lossType = event.detail.selected; 221 | this.model.lossType = this.lossType; 222 | }); 223 | 224 | this.learningRateOptions = [0.001, 0.003, 0.01, 0.03, 0.1, 0.3, 1.0]; 225 | this.dLearningRate = 0.1; 226 | this.querySelector('#d-learning-rate-dropdown')!.addEventListener( 227 | // tslint:disable-next-line:no-any event has no type 228 | 'iron-activate', (event: any) => { 229 | this.dLearningRate = +event.detail.selected; 230 | this.model.updateOptimizer( 231 | 'D', this.dOptimizerType, this.dLearningRate); 232 | }); 233 | this.gLearningRate = 0.1; 234 | this.querySelector('#g-learning-rate-dropdown')!.addEventListener( 235 | // tslint:disable-next-line:no-any event has no type 236 | 'iron-activate', (event: any) => { 237 | this.gLearningRate = +event.detail.selected; 238 | this.model.updateOptimizer( 239 | 'G', this.gOptimizerType, this.gLearningRate); 240 | }); 241 | 242 | this.optimizerTypeOptions = ['SGD', 'Adam']; 243 | this.dOptimizerType = 'SGD'; 244 | this.querySelector('#d-optimizer-type-dropdown')!.addEventListener( 245 | // tslint:disable-next-line:no-any event has no type 246 | 'iron-activate', (event: any) => { 247 | this.dOptimizerType = event.detail.selected; 248 | this.model.updateOptimizer( 249 | 'D', this.dOptimizerType, this.dLearningRate); 250 | }); 251 | this.gOptimizerType = 'SGD'; 252 | this.querySelector('#g-optimizer-type-dropdown')!.addEventListener( 253 | // tslint:disable-next-line:no-any event has no type 254 | 'iron-activate', (event: any) => { 255 | this.gOptimizerType = event.detail.selected; 256 | this.model.updateOptimizer( 257 | 'G', this.gOptimizerType, this.gLearningRate); 258 | }); 259 | 260 | this.shapeNames = ['line', 'gaussians', 'ring', 'disjoint', 'drawing']; 261 | this.selectedShapeName = 'gaussians'; 262 | 263 | const distributionElementList = 264 | document.querySelectorAll('.distribution-item'); 265 | 266 | for (let i = 0; i < distributionElementList.length; ++i) { 267 | // tslint:disable-next-line:no-any event has no type 268 | distributionElementList[i].addEventListener('click', (event: any) => 269 | this.changeDataset(event.target), false); 270 | } 271 | 272 | this.noiseTypes = 273 | ['1D Uniform', '1D Gaussian', '2D Uniform', '2D Gaussian']; 274 | this.selectedNoiseType = '2D Uniform'; 275 | this.noiseSize = 2; 276 | this.querySelector('#noise-dropdown')!.addEventListener( 277 | // tslint:disable-next-line:no-any event has no type 278 | 'iron-activate', (event: any) => { 279 | this.selectedNoiseType = event.detail.selected; 280 | this.noiseSize = +this.selectedNoiseType.substring(0, 1); 281 | this.disabledPretrainedMode(); 282 | this.createExperiment(); 283 | }); 284 | 285 | // Checkbox toggles. 286 | const checkboxList = [ 287 | { 288 | graph: '#overlap-plots', 289 | description: '#toggle-right-discriminator', 290 | layer: '#vis-discriminator-output' 291 | }, 292 | { 293 | graph: '#enable-manifold', 294 | description: '#toggle-right-generator', 295 | layer: '#vis-manifold' 296 | }, 297 | { 298 | graph: '#show-t-samples', 299 | description: '#toggle-right-real-samples', 300 | layer: '#vis-true-samples' 301 | }, 302 | { 303 | graph: '#show-g-samples', 304 | description: '#toggle-right-fake-samples', 305 | layer: '#vis-generated-samples' 306 | }, 307 | { 308 | graph: '#show-g-gradients', 309 | description: '#toggle-right-gradients', 310 | layer: '#vis-generator-gradients' 311 | } 312 | ]; 313 | checkboxList.forEach(layer => { 314 | this.querySelector(layer.graph)!.addEventListener( 315 | 'change', (event: Event) => { 316 | const container = 317 | this.querySelector(layer.layer) as SVGGElement; 318 | // tslint:disable-next-line:no-any 319 | container.style.visibility = 320 | (event.target as any).checked ? 'visible' : 'hidden'; 321 | 322 | const element = 323 | this.querySelector(layer.description) as HTMLElement; 324 | // tslint:disable-next-line:no-any 325 | if ((event.target as any).checked) { 326 | element.classList.add('checked'); 327 | } else { 328 | element.classList.remove('checked'); 329 | } 330 | }); 331 | this.querySelector(layer.description)!.addEventListener( 332 | 'click', (event: Event) => { 333 | const spanElement = 334 | this.querySelector(layer.description) as HTMLElement; 335 | const container = 336 | this.querySelector(layer.layer) as HTMLElement; 337 | const element = 338 | this.querySelector(layer.graph) as HTMLInputElement; 339 | 340 | // tslint:disable-next-line:no-any 341 | if ((event.target as any).classList.contains('checked')) { 342 | spanElement.classList.remove('checked'); 343 | container.style.visibility = 'hidden'; 344 | element.checked = false; 345 | } else { 346 | spanElement.classList.add('checked'); 347 | container.style.visibility = 'visible' 348 | element.checked = true; 349 | } 350 | }); 351 | }); 352 | 353 | // Pre-trained checkbox. 354 | this.usePretrained = true; 355 | this.querySelector('#toggle-pretrained')!.addEventListener( 356 | 'change', (event: Event) => { 357 | // tslint:disable-next-line:no-any 358 | this.usePretrained = (event.target as any).checked; 359 | this.loadModelAndCreateExperiment(); 360 | }); 361 | 362 | // Timeline controls. 363 | document.getElementById('play-pause-button').addEventListener( 364 | 'click', () => this.onClickPlayPauseButton()); 365 | document.getElementById('reset-button').addEventListener( 366 | 'click', () => this.onClickResetButton()); 367 | 368 | document.getElementById('next-step-d-button').addEventListener( 369 | 'click', () => this.onClickNextStepButton('D')); 370 | document.getElementById('next-step-g-button').addEventListener( 371 | 'click', () => this.onClickNextStepButton('G')); 372 | document.getElementById('next-step-all-button').addEventListener( 373 | 'click', () => this.onClickNextStepButton()); 374 | 375 | this.stepMode = false; 376 | document.getElementById('next-step-button').addEventListener( 377 | 'click', () => this.onClickStepModeButton()); 378 | 379 | this.slowMode = false; 380 | document.getElementById('slow-step')!.addEventListener( 381 | 'click', () => this.onClickSlowModeButton()); 382 | 383 | this.editMode = true; 384 | document.getElementById('edit-model-button')!.addEventListener( 385 | 'click', () => this.onClickEditModeButton()); 386 | this.onClickEditModeButton(); 387 | 388 | this.iterCountElement = 389 | document.getElementById('iteration-count') as HTMLElement; 390 | 391 | document.getElementById('save-model')!.addEventListener( 392 | 'click', () => this.onClickSaveModelButton()); 393 | 394 | // Visualization. 395 | this.plotSizePx = 400; 396 | this.mediumPlotSizePx = 140; 397 | this.smallPlotSizePx = 50; 398 | 399 | this.colorScale = interpolatePRGn; 400 | 401 | this.gDotsElementList = [ 402 | '#vis-generated-samples', 403 | '#svg-generated-samples', 404 | '#svg-generated-prediction' 405 | ]; 406 | this.dFlowElements = 407 | this.querySelectorAll('.d-update-flow') as NodeListOf; 408 | this.gFlowElements = 409 | this.querySelectorAll('.g-update-flow') as NodeListOf; 410 | 411 | // Generator animation. 412 | document.getElementById('svg-generator-manifold')!.addEventListener( 413 | 'mouseenter', () => { 414 | this.playGeneratorAnimation(); 415 | }); 416 | 417 | // Drawing-related. 418 | this.canvas = 419 | document.getElementById('input-drawing-canvas') as HTMLCanvasElement; 420 | this.drawing = new ganlab_drawing.GANLabDrawing( 421 | this.canvas, this.plotSizePx); 422 | 423 | this.finishDrawingButton = 424 | document.getElementById('finish-drawing') as HTMLInputElement; 425 | this.finishDrawingButton.addEventListener( 426 | 'click', () => this.onClickFinishDrawingButton()); 427 | 428 | // Create a new experiment. 429 | this.loadModelAndCreateExperiment(); 430 | } 431 | 432 | private createExperiment() { 433 | // Reset. 434 | this.pause(); 435 | this.iterationCount = 0; 436 | this.iterCountElement.innerText = this.zeroPad(this.iterationCount); 437 | 438 | this.isPausedOngoingIteration = false; 439 | 440 | document.getElementById('d-loss-value').innerText = '-'; 441 | document.getElementById('g-loss-value').innerText = '-'; 442 | document.getElementById('d-loss-bar').style.width = '0'; 443 | document.getElementById('g-loss-bar').style.width = '0'; 444 | this.recreateCharts(); 445 | 446 | const dataElements = [ 447 | d3.select('#vis-true-samples').selectAll('.true-dot'), 448 | d3.select('#svg-true-samples').selectAll('.true-dot'), 449 | d3.select('#svg-true-prediction').selectAll('.true-dot'), 450 | d3.select('#svg-noise').selectAll('.noise-dot'), 451 | d3.select('#vis-generated-samples').selectAll('.generated-dot'), 452 | d3.select('#svg-generated-samples').selectAll('.generated-dot'), 453 | d3.select('#svg-generated-prediction').selectAll('.generated-dot'), 454 | d3.select('#vis-discriminator-output').selectAll('.uniform-dot'), 455 | d3.select('#svg-discriminator-output').selectAll('.uniform-dot'), 456 | d3.select('#vis-manifold').selectAll('.uniform-generated-dot'), 457 | d3.select('#vis-manifold').selectAll('.manifold-cells'), 458 | d3.select('#vis-manifold').selectAll('.grids'), 459 | d3.select('#svg-generator-manifold').selectAll('.uniform-generated-dot'), 460 | d3.select('#svg-generator-manifold').selectAll('.manifold-cells'), 461 | d3.select('#svg-generator-manifold').selectAll('.grids'), 462 | d3.select('#vis-generator-gradients').selectAll('.gradient-generated'), 463 | d3.select('#svg-generator-gradients').selectAll('.gradient-generated') 464 | ]; 465 | dataElements.forEach((element) => { 466 | element.data([]).exit().remove(); 467 | }); 468 | 469 | // Input providers. 470 | const noiseProviderBuilder = 471 | new ganlab_input_providers.GANLabNoiseProviderBuilder( 472 | this.noiseSize, this.selectedNoiseType, 473 | ATLAS_SIZE, BATCH_SIZE); 474 | noiseProviderBuilder.generateAtlas(); 475 | this.noiseProvider = noiseProviderBuilder.getInputProvider(); 476 | this.noiseProviderFixed = noiseProviderBuilder.getInputProvider(true); 477 | 478 | const drawingPositions = this.drawing.drawingPositions; 479 | const trueSampleProviderBuilder = 480 | new ganlab_input_providers.GANLabTrueSampleProviderBuilder( 481 | ATLAS_SIZE, this.selectedShapeName, 482 | drawingPositions, BATCH_SIZE); 483 | trueSampleProviderBuilder.generateAtlas(); 484 | this.trueSampleProvider = trueSampleProviderBuilder.getInputProvider(); 485 | this.trueSampleProviderFixed = 486 | trueSampleProviderBuilder.getInputProvider(true); 487 | 488 | if (this.noiseSize <= 2) { 489 | const uniformNoiseProviderBuilder = 490 | new ganlab_input_providers.GANLabUniformNoiseProviderBuilder( 491 | this.noiseSize, NUM_MANIFOLD_CELLS, BATCH_SIZE); 492 | uniformNoiseProviderBuilder.generateAtlas(); 493 | if (this.selectedNoiseType === '2D Gaussian') { 494 | this.densitiesForGaussian = 495 | uniformNoiseProviderBuilder.calculateDensitiesForGaussian(); 496 | } 497 | this.uniformNoiseProvider = 498 | uniformNoiseProviderBuilder.getInputProvider(); 499 | } 500 | 501 | const uniformSampleProviderBuilder = 502 | new ganlab_input_providers.GANLabUniformSampleProviderBuilder( 503 | NUM_GRID_CELLS, BATCH_SIZE); 504 | uniformSampleProviderBuilder.generateAtlas(); 505 | this.uniformInputProvider = uniformSampleProviderBuilder.getInputProvider(); 506 | 507 | // Visualize true samples. 508 | this.visualizeTrueDistribution(trueSampleProviderBuilder.getInputAtlas()); 509 | 510 | // Visualize noise samples. 511 | this.visualizeNoiseDistribution(noiseProviderBuilder.getNoiseSample()); 512 | 513 | // Initialize evaluator. 514 | this.evaluator = 515 | new ganlab_evaluators.GANLabEvaluatorGridDensities(NUM_GRID_CELLS); 516 | this.evaluator.createGridsForTrue( 517 | trueSampleProviderBuilder.getInputAtlas(), NUM_TRUE_SAMPLES_VISUALIZED); 518 | 519 | // Prepare for model. 520 | this.model = new ganlab_models.GANLabModel( 521 | this.noiseSize, this.numGeneratorLayers, this.numDiscriminatorLayers, 522 | this.numGeneratorNeurons, this.numDiscriminatorNeurons, 523 | BATCH_SIZE, this.lossType); 524 | this.model.initializeModelVariables(); 525 | this.model.updateOptimizer('D', this.dOptimizerType, this.dLearningRate); 526 | this.model.updateOptimizer('G', this.gOptimizerType, this.gLearningRate); 527 | } 528 | 529 | private changeDataset(element: HTMLElement) { 530 | this.selectedShapeName = element.getAttribute('data-distribution-name'); 531 | 532 | const distributionElementList = 533 | document.querySelectorAll('.distribution-item'); 534 | for (let i = 0; i < distributionElementList.length; ++i) { 535 | if (distributionElementList[i].classList.contains('selected')) { 536 | distributionElementList[i].classList.remove('selected'); 537 | } 538 | } 539 | if (!element.classList.contains('selected')) { 540 | element.classList.add('selected'); 541 | } 542 | 543 | this.disabledPretrainedMode(); 544 | this.loadModelAndCreateExperiment(); 545 | } 546 | 547 | private loadModelAndCreateExperiment() { 548 | if (this.selectedShapeName === 'drawing') { 549 | this.pause(); 550 | this.drawing.prepareDrawing(); 551 | this.disabledPretrainedMode(); 552 | } else if (this.usePretrained === true) { 553 | const filename = `pretrained_${this.selectedShapeName}`; 554 | this.loadPretrainedWeightFile(filename).then((loadedModel) => { 555 | const loadedIterCount = this.iterationCount; 556 | 557 | this.createExperiment(); 558 | this.model.loadPretrainedWeights(loadedModel); 559 | 560 | // Run one iteration for visualization. 561 | this.isPlaying = true; 562 | this.iterateTraining(false); 563 | this.isPlaying = false; 564 | 565 | this.iterationCount = loadedIterCount; 566 | this.iterCountElement.innerText = this.zeroPad(this.iterationCount); 567 | }); 568 | } else { 569 | const filename = `pretrained_${this.selectedShapeName}`; 570 | this.loadPretrainedWeightFile(filename).then((loadedModel) => { 571 | this.createExperiment(); 572 | }); 573 | } 574 | } 575 | 576 | private visualizeTrueDistribution(inputAtlasList: number[]) { 577 | const color = scaleSequential(interpolateGreens) 578 | .domain([0, 0.05]); 579 | 580 | const trueDistribution: Array<[number, number]> = []; 581 | while (trueDistribution.length < NUM_TRUE_SAMPLES_VISUALIZED) { 582 | const values = inputAtlasList.splice(0, 2); 583 | trueDistribution.push([values[0], values[1]]); 584 | } 585 | 586 | const trueDotsElementList = [ 587 | '#vis-true-samples', 588 | '#svg-true-samples', 589 | ]; 590 | trueDotsElementList.forEach((dotsElement, k) => { 591 | const plotSizePx = k === 0 ? this.plotSizePx : this.smallPlotSizePx; 592 | const radius = k === 0 ? 2 : 1; 593 | d3.select(dotsElement) 594 | .selectAll('.true-dot') 595 | .data(trueDistribution) 596 | .enter() 597 | .append('circle') 598 | .attr('class', 'true-dot gan-lab') 599 | .attr('r', radius) 600 | .attr('cx', (d: number[]) => d[0] * plotSizePx) 601 | .attr('cy', (d: number[]) => (1.0 - d[1]) * plotSizePx) 602 | .append('title') 603 | .text((d: number[]) => `${d[0].toFixed(2)}, ${d[1].toFixed(2)}`); 604 | }); 605 | } 606 | 607 | private visualizeNoiseDistribution(inputList: Float32Array) { 608 | const noiseSamples: number[][] = []; 609 | for (let i = 0; i < inputList.length / this.noiseSize; ++i) { 610 | const values = []; 611 | for (let j = 0; j < this.noiseSize; ++j) { 612 | values.push(inputList[i * this.noiseSize + j]); 613 | } 614 | noiseSamples.push(values); 615 | } 616 | 617 | d3.select('#svg-noise') 618 | .selectAll('.noise-dot') 619 | .data(noiseSamples) 620 | .enter() 621 | .append('circle') 622 | .attr('class', 'noise-dot gan-lab') 623 | .attr('r', 1) 624 | .attr('cx', (d: number[]) => d[0] * this.smallPlotSizePx) 625 | .attr('cy', (d: number[]) => this.noiseSize === 1 626 | ? this.smallPlotSizePx / 2 627 | : (1.0 - d[1]) * this.smallPlotSizePx) 628 | .append('title') 629 | .text((d: number[], i: number) => this.noiseSize === 1 630 | ? `${Number(d[0]).toFixed(2)} (${i})` 631 | : `${Number(d[0]).toFixed(2)},${Number(d[1]).toFixed(2)} (${i})`); 632 | } 633 | 634 | private onClickFinishDrawingButton() { 635 | if (this.drawing.drawingPositions.length === 0) { 636 | alert('Draw something on canvas'); 637 | } else { 638 | const drawingElement = 639 | this.querySelector('#drawing-container') as HTMLElement; 640 | drawingElement.style.display = 'none'; 641 | const drawingBackgroundElement = 642 | this.querySelector('#drawing-disable-background') as HTMLDivElement; 643 | drawingBackgroundElement.style.display = 'none'; 644 | this.createExperiment(); 645 | } 646 | } 647 | 648 | private disabledPretrainedMode() { 649 | this.usePretrained = false; 650 | const element = 651 | document.getElementById('toggle-pretrained') as HTMLInputElement; 652 | element.checked = false; 653 | } 654 | 655 | private play() { 656 | if (this.stepMode) { 657 | this.onClickStepModeButton(); 658 | } 659 | 660 | this.isPlaying = true; 661 | document.getElementById('play-pause-button')!.classList.add('playing'); 662 | if (!this.isPausedOngoingIteration) { 663 | this.iterateTraining(true); 664 | } 665 | document.getElementById('model-vis-svg').classList.add('playing'); 666 | } 667 | 668 | private pause() { 669 | // Extra iteration for visualization. 670 | this.iterateTraining(false); 671 | this.isPlaying = false; 672 | const button = document.getElementById('play-pause-button'); 673 | if (button.classList.contains('playing')) { 674 | button.classList.remove('playing'); 675 | } 676 | document.getElementById('model-vis-svg').classList.remove('playing'); 677 | } 678 | 679 | private onClickPlayPauseButton() { 680 | if (this.isPlaying) { 681 | this.pause(); 682 | } else { 683 | this.play(); 684 | } 685 | } 686 | 687 | private onClickNextStepButton(type?: string) { 688 | if (this.isPlaying) { 689 | this.pause(); 690 | } 691 | this.isPlaying = true; 692 | this.iterateTraining(false, type); 693 | this.isPlaying = false; 694 | } 695 | 696 | private onClickResetButton() { 697 | if (this.isPlaying) { 698 | this.pause(); 699 | } 700 | this.loadModelAndCreateExperiment(); 701 | } 702 | 703 | private onClickStepModeButton() { 704 | if (!this.stepMode) { 705 | if (this.isPlaying) { 706 | this.pause(); 707 | } 708 | if (this.slowMode) { 709 | this.onClickSlowModeButton(); 710 | } 711 | 712 | this.stepMode = true; 713 | document.getElementById('next-step-button') 714 | .classList.add('mdl-button--colored'); 715 | document.getElementById('step-buttons').style.display = 'block'; 716 | } else { 717 | this.stepMode = false; 718 | document.getElementById('next-step-button') 719 | .classList.remove('mdl-button--colored'); 720 | document.getElementById('step-buttons').style.display = 'none'; 721 | } 722 | } 723 | 724 | private onClickSlowModeButton() { 725 | if (this.editMode) { 726 | this.onClickEditModeButton(); 727 | } 728 | this.slowMode = !this.slowMode; 729 | 730 | if (this.slowMode === true) { 731 | if (this.stepMode) { 732 | this.onClickStepModeButton(); 733 | } 734 | document.getElementById('slow-step') 735 | .classList.add('mdl-button--colored'); 736 | document.getElementById('tooltips').classList.add('shown'); 737 | } else { 738 | document.getElementById('slow-step') 739 | .classList.remove('mdl-button--colored'); 740 | this.dehighlightStep(); 741 | const container = 742 | document.getElementById('model-visualization-container'); 743 | if (container.classList.contains('any-highlighted')) { 744 | container.classList.remove('any-highlighted'); 745 | } 746 | document.getElementById( 747 | 'component-generator').classList.remove('deactivated'); 748 | document.getElementById( 749 | 'component-discriminator').classList.remove('deactivated'); 750 | document.getElementById( 751 | 'component-d-loss').classList.remove('activated'); 752 | document.getElementById( 753 | 'component-g-loss').classList.remove('activated'); 754 | for (let i = 0; i < this.dFlowElements.length; ++i) { 755 | this.dFlowElements[i].classList.remove('d-activated'); 756 | } 757 | for (let i = 0; i < this.gFlowElements.length; ++i) { 758 | this.gFlowElements[i].classList.remove('g-activated'); 759 | } 760 | document.getElementById('tooltips')!.classList.remove('shown'); 761 | } 762 | } 763 | 764 | private onClickEditModeButton() { 765 | const elements: NodeListOf = 766 | this.querySelectorAll('.config-item'); 767 | for (let i = 0; i < elements.length; ++i) { 768 | elements[i].style.visibility = 769 | this.editMode ? 'hidden' : 'visible'; 770 | } 771 | this.editMode = !this.editMode; 772 | if (this.editMode === true) { 773 | document.getElementById('edit-model-button') 774 | .classList.add('mdl-button--colored'); 775 | } else { 776 | document.getElementById('edit-model-button') 777 | .classList.remove('mdl-button--colored'); 778 | } 779 | } 780 | 781 | private zeroPad(n: number): string { 782 | const pad = '000000'; 783 | return (pad + n).slice(-pad.length).replace(/\B(?=(\d{3})+(?!\d))/g, ','); 784 | } 785 | 786 | private async iterateTraining(keepIterating: boolean, type?: string) { 787 | if (!this.isPlaying) { 788 | return; 789 | } 790 | 791 | this.iterationCount++; 792 | 793 | if (!keepIterating || this.iterationCount === 1 || this.slowMode || 794 | this.iterationCount % EPOCH_INTERVAL === 0) { 795 | this.iterCountElement.innerText = this.zeroPad(this.iterationCount); 796 | 797 | d3.select('#model-vis-svg') 798 | .selectAll('path') 799 | .style('stroke-dashoffset', () => this.iterationCount * (-1)); 800 | } 801 | 802 | // Visualize generated samples before training. 803 | if (this.slowMode) { 804 | const container = 805 | document.getElementById('model-visualization-container'); 806 | if (!container.classList.contains('any-highlighted')) { 807 | container.classList.add('any-highlighted'); 808 | } 809 | document.getElementById( 810 | 'component-generator').classList.add('deactivated'); 811 | document.getElementById( 812 | 'component-d-loss').classList.add('activated'); 813 | for (let i = 0; i < this.dFlowElements.length; ++i) { 814 | this.dFlowElements[i].classList.add('d-activated'); 815 | } 816 | await this.sleep(SLOW_INTERVAL_MS); 817 | 818 | await this.highlightStep(true, 819 | ['component-noise', 'component-generator', 820 | 'component-generated-samples'], 821 | 'tooltip-d-generated-samples'); 822 | } 823 | 824 | tf.tidy(() => { 825 | let gResultData: Float32Array; 826 | if (!keepIterating || this.iterationCount === 1 || this.slowMode || 827 | this.iterationCount % VIS_INTERVAL === 0) { 828 | const gDataBefore: Array<[number, number]> = []; 829 | const noiseFixedBatch = 830 | this.noiseProviderFixed.getNextCopy() as tf.Tensor2D; 831 | const gResult = this.model.generator(noiseFixedBatch); 832 | gResultData = gResult.dataSync() as Float32Array; 833 | for (let j = 0; j < gResultData.length / 2; ++j) { 834 | gDataBefore.push([gResultData[j * 2], gResultData[j * 2 + 1]]); 835 | } 836 | 837 | if (this.iterationCount === 1) { 838 | this.gDotsElementList.forEach((dotsElement, k) => { 839 | const plotSizePx = k === 0 ? this.plotSizePx : this.smallPlotSizePx; 840 | const radius = k === 0 ? 2 : 1; 841 | d3.select(dotsElement).selectAll('.generated-dot') 842 | .data(gDataBefore) 843 | .enter() 844 | .append('circle') 845 | .attr('class', 'generated-dot gan-lab') 846 | .attr('r', radius) 847 | .attr('cx', (d: number[]) => d[0] * plotSizePx) 848 | .attr('cy', (d: number[]) => (1.0 - d[1]) * plotSizePx) 849 | .append('title') 850 | .text((d: number[]) => 851 | `${Number(d[0]).toFixed(2)},${Number(d[1]).toFixed(2)}`); 852 | }); 853 | } else { 854 | this.gDotsElementList.forEach((dotsElement, k) => { 855 | const plotSizePx = k === 0 ? this.plotSizePx : this.smallPlotSizePx; 856 | d3Transition.transition() 857 | .select(dotsElement) 858 | .selectAll('.generated-dot') 859 | .selection().data(gDataBefore) 860 | .transition().duration(SLOW_INTERVAL_MS / 600) 861 | .attr('cx', (d: number[]) => d[0] * plotSizePx) 862 | .attr('cy', (d: number[]) => (1.0 - d[1]) * plotSizePx); 863 | }); 864 | } 865 | } 866 | }); 867 | 868 | if (this.slowMode) { 869 | await this.highlightStep(true, 870 | ['component-true-samples', 'component-generated-samples', 871 | 'component-discriminator', 872 | 'component-true-prediction', 'component-generated-prediction'], 873 | 'tooltip-d-prediction'); 874 | } 875 | 876 | if (!keepIterating || this.iterationCount === 1 || this.slowMode || 877 | this.iterationCount % VIS_INTERVAL === 0) { 878 | tf.tidy(() => { 879 | const noiseBatch = 880 | this.noiseProviderFixed.getNextCopy() as tf.Tensor2D; 881 | const trueSampleBatch = 882 | this.trueSampleProviderFixed.getNextCopy() as tf.Tensor2D; 883 | const truePred = this.model.discriminator(trueSampleBatch); 884 | const generatedPred = 885 | this.model.discriminator(this.model.generator(noiseBatch)); 886 | 887 | const inputData1 = trueSampleBatch.dataSync(); 888 | const resultData1 = truePred.dataSync(); 889 | const resultData2 = generatedPred.dataSync(); 890 | const pInputData1: number[][] = []; 891 | const pData1: number[] = []; 892 | const pData2: number[] = []; 893 | for (let i = 0; i < inputData1.length / 2; ++i) { 894 | pInputData1.push([inputData1[i * 2], inputData1[i * 2 + 1]]); 895 | } 896 | for (let i = 0; i < resultData1.length; ++i) { 897 | pData1.push(resultData1[i]); 898 | } 899 | for (let i = 0; i < resultData2.length; ++i) { 900 | pData2.push(resultData2[i]); 901 | } 902 | 903 | if (this.iterationCount === 1) { 904 | d3.select('#svg-true-prediction') 905 | .selectAll('.true-dot') 906 | .data(pInputData1) 907 | .enter() 908 | .append('circle') 909 | .attr('class', 'true-dot gan-lab') 910 | .attr('r', 1) 911 | .attr('cx', (d: number[]) => d[0] * this.smallPlotSizePx) 912 | .attr('cy', (d: number[]) => (1.0 - d[1]) * this.smallPlotSizePx); 913 | } 914 | const sqrtAbs = (d: number) => { 915 | if (d > 0.5) { 916 | return Math.pow(d * 2.0 - 1.0, 0.5) * 0.5 + 0.5; 917 | } else if (d < 0.5) { 918 | return Math.pow((d * 2.0 - 1.0) * (-1), 0.5) * (-0.5) + 0.5; 919 | } else { 920 | return 0.5; 921 | } 922 | }; 923 | d3.select('#svg-true-prediction') 924 | .selectAll('.true-dot') 925 | .data(pData1) 926 | .style('fill', (d: number) => this.colorScale(sqrtAbs(d))); 927 | if (this.iterationCount > 1 || this.usePretrained) { 928 | d3.select('#svg-generated-prediction') 929 | .selectAll('.generated-dot') 930 | .data(pData2) 931 | .style('fill', (d: number) => this.colorScale(sqrtAbs(d))); 932 | } 933 | }); 934 | } 935 | 936 | // Train Discriminator. 937 | let dCostVal: number = null; 938 | tf.tidy(() => { 939 | const kDSteps = type === 'D' ? 1 : (type === 'G' ? 0 : this.kDSteps); 940 | for (let j = 0; j < kDSteps; j++) { 941 | const dCost = this.model.dOptimizer.minimize(() => { 942 | const noiseBatch = this.noiseProvider.getNextCopy() as tf.Tensor2D; 943 | const trueSampleBatch = 944 | this.trueSampleProvider.getNextCopy() as tf.Tensor2D; 945 | const truePred = this.model.discriminator(trueSampleBatch); 946 | const generatedPred = 947 | this.model.discriminator(this.model.generator(noiseBatch)); 948 | return this.model.dLoss(truePred, generatedPred); 949 | }, true, this.model.dVariables); 950 | if ((!keepIterating || this.iterationCount === 1 || this.slowMode || 951 | this.iterationCount % VIS_INTERVAL === 0) 952 | && j + 1 === kDSteps) { 953 | dCostVal = dCost.get(); 954 | } 955 | } 956 | }); 957 | 958 | if (!keepIterating || this.iterationCount === 1 || this.slowMode || 959 | this.iterationCount % VIS_INTERVAL === 0) { 960 | 961 | if (this.slowMode) { 962 | await this.highlightStep(true, ['component-d-loss'], 'tooltip-d-loss'); 963 | } 964 | 965 | // Update discriminator loss. 966 | if (dCostVal) { 967 | document.getElementById('d-loss-value').innerText = 968 | (dCostVal / 2).toFixed(3); 969 | document.getElementById('d-loss-bar').title = (dCostVal / 2).toFixed(3); 970 | document.getElementById('d-loss-bar').style.width = 971 | this.model.lossType === 'LeastSq loss' 972 | ? `${dCostVal * 50.0}px` 973 | : `${Math.pow(dCostVal * 0.5, 2) * 50.0}px`; 974 | } 975 | 976 | if (this.slowMode) { 977 | await this.highlightStep(true, 978 | ['component-discriminator-gradients'], 'tooltip-d-gradients'); 979 | } 980 | 981 | if (this.slowMode) { 982 | await this.highlightStep(true, 983 | ['component-discriminator'], 'tooltip-update-discriminator'); 984 | } 985 | 986 | // Visualize discriminator's output. 987 | const dData: number[] = []; 988 | tf.tidy(() => { 989 | for (let i = 0; i < NUM_GRID_CELLS * NUM_GRID_CELLS / BATCH_SIZE; ++i) { 990 | const inputBatch = 991 | this.uniformInputProvider.getNextCopy() as tf.Tensor2D; 992 | const result = this.model.discriminator(inputBatch); 993 | const resultData = result.dataSync(); 994 | for (let j = 0; j < resultData.length; ++j) { 995 | dData.push(resultData[j]); 996 | } 997 | } 998 | 999 | const gridDotsElementList = [ 1000 | '#vis-discriminator-output', 1001 | '#svg-discriminator-output' 1002 | ]; 1003 | if (this.iterationCount === 1) { 1004 | gridDotsElementList.forEach((dotsElement, k) => { 1005 | const plotSizePx = k === 0 ? this.plotSizePx : 1006 | (k === 1 ? this.mediumPlotSizePx : this.smallPlotSizePx); 1007 | d3.select(dotsElement) 1008 | .selectAll('.uniform-dot') 1009 | .data(dData) 1010 | .enter() 1011 | .append('rect') 1012 | .attr('class', 'uniform-dot gan-lab') 1013 | .attr('width', plotSizePx / NUM_GRID_CELLS) 1014 | .attr('height', plotSizePx / NUM_GRID_CELLS) 1015 | .attr( 1016 | 'x', 1017 | (d: number, i: number) => 1018 | (i % NUM_GRID_CELLS) * (plotSizePx / NUM_GRID_CELLS)) 1019 | .attr( 1020 | 'y', 1021 | (d: number, i: number) => plotSizePx - 1022 | (Math.floor(i / NUM_GRID_CELLS) + 1) * 1023 | (plotSizePx / NUM_GRID_CELLS)) 1024 | .style('fill', (d: number) => this.colorScale(d)); 1025 | }); 1026 | } 1027 | gridDotsElementList.forEach((dotsElement) => { 1028 | d3.select(dotsElement) 1029 | .selectAll('.uniform-dot') 1030 | .data(dData) 1031 | .style('fill', (d: number) => this.colorScale(d)); 1032 | }); 1033 | }); 1034 | } 1035 | 1036 | if (this.slowMode) { 1037 | await this.sleep(SLOW_INTERVAL_MS); 1038 | this.dehighlightStep(); 1039 | 1040 | document.getElementById( 1041 | 'component-generator').classList.remove('deactivated'); 1042 | document.getElementById( 1043 | 'component-d-loss').classList.remove('activated'); 1044 | for (let i = 0; i < this.dFlowElements.length; ++i) { 1045 | this.dFlowElements[i].classList.remove('d-activated'); 1046 | } 1047 | 1048 | document.getElementById( 1049 | 'component-discriminator').classList.add('deactivated'); 1050 | document.getElementById( 1051 | 'component-g-loss').classList.add('activated'); 1052 | for (let i = 0; i < this.gFlowElements.length; ++i) { 1053 | this.gFlowElements[i].classList.add('g-activated'); 1054 | } 1055 | await this.sleep(SLOW_INTERVAL_MS); 1056 | 1057 | await this.highlightStep(false, 1058 | ['component-noise', 'component-generator', 1059 | 'component-generated-samples'], 1060 | 'tooltip-g-generated-samples'); 1061 | } 1062 | 1063 | if (this.slowMode) { 1064 | await this.highlightStep(false, 1065 | ['component-generated-samples', 'component-discriminator', 1066 | 'component-generated-prediction'], 1067 | 'tooltip-g-prediction'); 1068 | } 1069 | 1070 | if (this.slowMode) { 1071 | await this.highlightStep(false, ['component-g-loss'], 'tooltip-g-loss'); 1072 | } 1073 | 1074 | // Visualize generated samples before training. 1075 | const gradData: Array<[number, number, number, number]> = []; 1076 | tf.tidy(() => { 1077 | let gResultData: Float32Array; 1078 | if (!keepIterating || this.iterationCount === 1 || this.slowMode || 1079 | this.iterationCount % VIS_INTERVAL === 0) { 1080 | const gDataBefore: Array<[number, number]> = []; 1081 | const noiseFixedBatch = 1082 | this.noiseProviderFixed.getNextCopy() as tf.Tensor2D; 1083 | const gResult = this.model.generator(noiseFixedBatch); 1084 | gResultData = gResult.dataSync() as Float32Array; 1085 | for (let j = 0; j < gResultData.length / 2; ++j) { 1086 | gDataBefore.push([gResultData[j * 2], gResultData[j * 2 + 1]]); 1087 | } 1088 | 1089 | this.gDotsElementList.forEach((dotsElement, k) => { 1090 | const plotSizePx = k === 0 ? this.plotSizePx : this.smallPlotSizePx; 1091 | d3Transition.transition() 1092 | .select(dotsElement) 1093 | .selectAll('.generated-dot') 1094 | .selection().data(gDataBefore) 1095 | .transition().duration(SLOW_INTERVAL_MS / 600) 1096 | .attr('cx', (d: number[]) => d[0] * plotSizePx) 1097 | .attr('cy', (d: number[]) => (1.0 - d[1]) * plotSizePx); 1098 | }); 1099 | } 1100 | 1101 | // Compute and store gradients before training. 1102 | if (!keepIterating || this.iterationCount === 1 || this.slowMode || 1103 | this.iterationCount % VIS_INTERVAL === 0) { 1104 | const gradFunction = tf.grad(this.model.discriminator); 1105 | const noiseFixedBatchForGrad = 1106 | this.noiseProviderFixed.getNextCopy() as tf.Tensor2D; 1107 | const gSamples = this.model.generator(noiseFixedBatchForGrad); 1108 | const grad = gradFunction(gSamples); 1109 | const gGradient = grad.dataSync(); 1110 | 1111 | for (let i = 0; i < gResultData.length / 2; ++i) { 1112 | gradData.push([ 1113 | gResultData[i * 2], gResultData[i * 2 + 1], 1114 | gGradient[i * 2], gGradient[i * 2 + 1] 1115 | ]); 1116 | } 1117 | } 1118 | }); 1119 | 1120 | // Train generator. 1121 | const kGSteps = type === 'G' ? 1 : (type === 'D' ? 0 : this.kGSteps); 1122 | let gCostVal: number = null; 1123 | tf.tidy(() => { 1124 | for (let j = 0; j < kGSteps; j++) { 1125 | const gCost = this.model.gOptimizer.minimize(() => { 1126 | const noiseBatch = this.noiseProvider.getNextCopy() as tf.Tensor2D; 1127 | const pred = 1128 | this.model.discriminator(this.model.generator(noiseBatch)); 1129 | return this.model.gLoss(pred); 1130 | }, true, this.model.gVariables); 1131 | if ((!keepIterating || this.iterationCount === 1 || this.slowMode || 1132 | this.iterationCount % VIS_INTERVAL === 0) 1133 | && j + 1 === kGSteps) { 1134 | gCostVal = gCost.get(); 1135 | } 1136 | } 1137 | }); 1138 | 1139 | if (!keepIterating || this.iterationCount === 1 || this.slowMode || 1140 | this.iterationCount % VIS_INTERVAL === 0) { 1141 | // Update generator loss. 1142 | if (gCostVal) { 1143 | document.getElementById('g-loss-value').innerText = 1144 | gCostVal.toFixed(3); 1145 | document.getElementById('g-loss-bar').title = gCostVal.toFixed(3); 1146 | document.getElementById('g-loss-bar').style.width = 1147 | this.model.lossType === 'LeastSq loss' 1148 | ? `${gCostVal * 2.0 * 50.0}px` 1149 | : `${Math.pow(gCostVal, 2) * 50.0}px`; 1150 | } 1151 | 1152 | // Update charts. 1153 | if (this.iterationCount === 1) { 1154 | const chartContainer = 1155 | document.getElementById('chart-container') as HTMLElement; 1156 | chartContainer.style.visibility = 'visible'; 1157 | } 1158 | 1159 | this.updateChartData(this.costChartData, this.iterationCount, 1160 | [dCostVal ? dCostVal / 2 : null, gCostVal]); 1161 | this.costChart.update(); 1162 | 1163 | if (this.slowMode) { 1164 | await this.highlightStep(false, 1165 | ['component-generator-gradients'], 'tooltip-g-gradients'); 1166 | } 1167 | 1168 | // Visualize gradients for generator. 1169 | // Values already computed above. 1170 | const gradDotsElementList = [ 1171 | '#vis-generator-gradients', 1172 | '#svg-generator-gradients' 1173 | ]; 1174 | if (this.iterationCount === 1) { 1175 | gradDotsElementList.forEach((dotsElement, k) => { 1176 | const plotSizePx = k === 0 ? 1177 | this.plotSizePx : this.smallPlotSizePx; 1178 | const arrowWidth = k === 0 ? 0.002 : 0.001; 1179 | d3.select(dotsElement) 1180 | .selectAll('.gradient-generated') 1181 | .data(gradData) 1182 | .enter() 1183 | .append('polygon') 1184 | .attr('class', 'gradient-generated gan-lab') 1185 | .attr('points', (d: number[]) => 1186 | this.createArrowPolygon(d, plotSizePx, arrowWidth)); 1187 | }); 1188 | } 1189 | 1190 | gradDotsElementList.forEach((dotsElement, k) => { 1191 | const plotSizePx = k === 0 ? this.plotSizePx : this.smallPlotSizePx; 1192 | const arrowWidth = k === 0 ? 0.002 : 0.001; 1193 | d3Transition.transition() 1194 | .select(dotsElement) 1195 | .selectAll('.gradient-generated').selection().data(gradData) 1196 | .transition().duration(SLOW_INTERVAL_MS) 1197 | .attr('points', (d: number[]) => 1198 | this.createArrowPolygon(d, plotSizePx, arrowWidth)); 1199 | }); 1200 | 1201 | if (this.slowMode) { 1202 | await this.highlightStep(false, 1203 | ['component-generator'], 'tooltip-update-generator'); 1204 | } 1205 | 1206 | // Visualize manifold for 1-D or 2-D noise. 1207 | tf.tidy(() => { 1208 | if (this.noiseSize <= 2) { 1209 | const manifoldData: Float32Array[] = []; 1210 | const numBatches = Math.ceil(Math.pow( 1211 | NUM_MANIFOLD_CELLS + 1, this.noiseSize) / BATCH_SIZE); 1212 | const remainingDummy = BATCH_SIZE * numBatches - Math.pow( 1213 | NUM_MANIFOLD_CELLS + 1, this.noiseSize) * this.noiseSize; 1214 | for (let k = 0; k < numBatches; ++k) { 1215 | const noiseBatch = 1216 | this.uniformNoiseProvider.getNextCopy() as tf.Tensor2D; 1217 | const result = this.model.generator(noiseBatch); 1218 | const maniResult: Float32Array = result.dataSync() as Float32Array; 1219 | for (let i = 0; i < (k + 1 < numBatches ? 1220 | BATCH_SIZE : BATCH_SIZE - remainingDummy); ++i) { 1221 | manifoldData.push(maniResult.slice(i * 2, i * 2 + 2)); 1222 | } 1223 | } 1224 | 1225 | // Create grid cells. 1226 | const gridData: ManifoldCell[] = this.noiseSize === 1 1227 | ? [{ points: manifoldData }] 1228 | : this.createGridCellsFromManifoldData(manifoldData); 1229 | 1230 | const gManifoldElementList = [ 1231 | '#vis-manifold', 1232 | '#svg-generator-manifold' 1233 | ]; 1234 | gManifoldElementList.forEach((gManifoldElement, k) => { 1235 | const plotSizePx = 1236 | k === 0 ? this.plotSizePx : this.mediumPlotSizePx; 1237 | const manifoldCell = 1238 | line() 1239 | .x((d: number[]) => d[0] * plotSizePx) 1240 | .y((d: number[]) => (1.0 - d[1]) * plotSizePx); 1241 | 1242 | if (this.iterationCount === 1) { 1243 | d3.select(gManifoldElement) 1244 | .selectAll('.grids') 1245 | .data(gridData) 1246 | .enter() 1247 | .append('g') 1248 | .attr('class', 'grids gan-lab') 1249 | .append('path') 1250 | .attr('class', 'manifold-cell gan-lab') 1251 | .style('fill', () => { 1252 | return this.noiseSize === 2 ? '#7b3294' : 'none'; 1253 | }); 1254 | } 1255 | d3.select(gManifoldElement) 1256 | .selectAll('.grids') 1257 | .data(gridData) 1258 | .select('.manifold-cell') 1259 | .attr('d', (d: ManifoldCell) => manifoldCell( 1260 | d.points.map(point => [point[0], point[1]] as [number, number]) 1261 | )) 1262 | .style('fill-opacity', (d: ManifoldCell, i: number) => { 1263 | return this.selectedNoiseType === '2D Gaussian' 1264 | ? Math.min(0.1 + this.densitiesForGaussian[i] / 1265 | (d.area! * Math.pow(NUM_MANIFOLD_CELLS, 2)) * 0.2, 0.9) 1266 | : (this.noiseSize === 2 ? Math.max( 1267 | 0.9 - d.area! * 0.4 * Math.pow(NUM_MANIFOLD_CELLS, 2), 0.1) 1268 | : 'none'); 1269 | }); 1270 | 1271 | if (this.noiseSize === 1) { 1272 | const manifoldDots = 1273 | d3.select(gManifoldElement) 1274 | .selectAll('.uniform-generated-dot') 1275 | .data(manifoldData); 1276 | if (this.iterationCount === 1) { 1277 | manifoldDots.enter() 1278 | .append('circle') 1279 | .attr('class', 'uniform-generated-dot gan-lab') 1280 | .attr('r', 1); 1281 | } 1282 | manifoldDots 1283 | .attr('cx', (d: Float32Array) => d[0] * plotSizePx) 1284 | .attr('cy', (d: Float32Array) => (1.0 - d[1]) * plotSizePx); 1285 | } 1286 | }); 1287 | } 1288 | }); 1289 | 1290 | const gData: Array<[number, number]> = []; 1291 | tf.tidy(() => { 1292 | const noiseFixedBatch = 1293 | this.noiseProviderFixed.getNextCopy() as tf.Tensor2D; 1294 | const gResult = this.model.generator(noiseFixedBatch); 1295 | const gResultData = gResult.dataSync(); 1296 | for (let i = 0; i < gResultData.length / 2; ++i) { 1297 | gData.push([gResultData[i * 2], gResultData[i * 2 + 1]]); 1298 | } 1299 | }); 1300 | 1301 | // Visualize generated samples. 1302 | if (!this.slowMode) { 1303 | this.gDotsElementList.forEach((dotsElement, k) => { 1304 | const plotSizePx = k === 0 ? this.plotSizePx : this.smallPlotSizePx; 1305 | d3Transition.transition() 1306 | .select(dotsElement) 1307 | .selectAll('.generated-dot') 1308 | .selection() 1309 | .data(gData) 1310 | .transition().duration(SLOW_INTERVAL_MS) 1311 | .attr('cx', (d: number[]) => d[0] * plotSizePx) 1312 | .attr('cy', (d: number[]) => (1.0 - d[1]) * plotSizePx) 1313 | .select('title').text((d: number[], i: number) => 1314 | `${Number(d[0]).toFixed(2)},${Number(d[1]).toFixed(2)} (${i})`); 1315 | }); 1316 | 1317 | // Move gradients also. 1318 | for (let i = 0; i < gData.length; ++i) { 1319 | gradData[i][0] = gData[i][0]; 1320 | gradData[i][1] = gData[i][1]; 1321 | } 1322 | gradDotsElementList.forEach((dotsElement, k) => { 1323 | const plotSizePx = k === 0 ? this.plotSizePx : this.smallPlotSizePx; 1324 | const arrowWidth = k === 0 ? 0.002 : 0.001; 1325 | d3Transition.transition() 1326 | .select(dotsElement) 1327 | .selectAll('.gradient-generated').selection().data(gradData) 1328 | .transition().duration(SLOW_INTERVAL_MS) 1329 | .attr('points', (d: number[]) => 1330 | this.createArrowPolygon(d, plotSizePx, arrowWidth)); 1331 | }); 1332 | } 1333 | 1334 | // Simple grid-based evaluation. 1335 | this.evaluator.updateGridsForGenerated(gData); 1336 | this.updateChartData(this.evalChartData, this.iterationCount, [ 1337 | this.evaluator.getKLDivergenceScore(), 1338 | this.evaluator.getJSDivergenceScore() 1339 | ]); 1340 | this.evalChart.update(); 1341 | 1342 | if (this.slowMode) { 1343 | await this.sleep(SLOW_INTERVAL_MS); 1344 | this.dehighlightStep(); 1345 | 1346 | const container = 1347 | document.getElementById('model-visualization-container'); 1348 | if (container.classList.contains('any-highlighted')) { 1349 | container.classList.remove('any-highlighted'); 1350 | } 1351 | document.getElementById( 1352 | 'component-discriminator').classList.remove('deactivated'); 1353 | document.getElementById( 1354 | 'component-g-loss').classList.remove('activated'); 1355 | for (let i = 0; i < this.gFlowElements.length; ++i) { 1356 | this.gFlowElements[i].classList.remove('g-activated'); 1357 | } 1358 | } 1359 | } 1360 | 1361 | if (this.iterationCount >= 999999) { 1362 | this.isPlaying = false; 1363 | } 1364 | 1365 | requestAnimationFrame(() => this.iterateTraining(true)); 1366 | } 1367 | 1368 | private createArrowPolygon(d: number[], 1369 | plotSizePx: number, arrowWidth: number) { 1370 | const gradSize = Math.sqrt( 1371 | d[2] * d[2] + d[3] * d[3] + 0.00000001); 1372 | const xNorm = d[2] / gradSize; 1373 | const yNorm = d[3] / gradSize; 1374 | return `${d[0] * plotSizePx}, 1375 | ${(1.0 - d[1]) * plotSizePx} 1376 | ${(d[0] - yNorm * (-1) * arrowWidth) * plotSizePx}, 1377 | ${(1.0 - (d[1] - xNorm * arrowWidth)) * plotSizePx} 1378 | ${(d[0] + d[2] * GRAD_ARROW_UNIT_LEN) * plotSizePx}, 1379 | ${(1.0 - (d[1] + d[3] * GRAD_ARROW_UNIT_LEN)) * plotSizePx} 1380 | ${(d[0] - yNorm * arrowWidth) * plotSizePx}, 1381 | ${(1.0 - (d[1] - xNorm * (-1) * arrowWidth)) * plotSizePx}`; 1382 | } 1383 | 1384 | private createGridCellsFromManifoldData(manifoldData: Float32Array[]) { 1385 | const gridData: ManifoldCell[] = []; 1386 | let areaSum = 0.0; 1387 | for (let i = 0; i < NUM_MANIFOLD_CELLS * NUM_MANIFOLD_CELLS; ++i) { 1388 | const x = i % NUM_MANIFOLD_CELLS; 1389 | const y = Math.floor(i / NUM_MANIFOLD_CELLS); 1390 | const index = x + y * (NUM_MANIFOLD_CELLS + 1); 1391 | 1392 | const gridCell = []; 1393 | gridCell.push(manifoldData[index]); 1394 | gridCell.push(manifoldData[index + 1]); 1395 | gridCell.push(manifoldData[index + 1 + (NUM_MANIFOLD_CELLS + 1)]); 1396 | gridCell.push(manifoldData[index + (NUM_MANIFOLD_CELLS + 1)]); 1397 | gridCell.push(manifoldData[index]); 1398 | 1399 | // Calculate area by using four points. 1400 | let area = 0.0; 1401 | for (let j = 0; j < 4; ++j) { 1402 | area += gridCell[j % 4][0] * gridCell[(j + 1) % 4][1] - 1403 | gridCell[j % 4][1] * gridCell[(j + 1) % 4][0]; 1404 | } 1405 | area = 0.5 * Math.abs(area); 1406 | areaSum += area; 1407 | 1408 | gridData.push({ points: gridCell, area }); 1409 | } 1410 | // Normalize area. 1411 | gridData.forEach(grid => { 1412 | if (grid.area) { 1413 | grid.area = grid.area / areaSum; 1414 | } 1415 | }); 1416 | 1417 | return gridData; 1418 | } 1419 | 1420 | private playGeneratorAnimation() { 1421 | if (this.noiseSize <= 2) { 1422 | const manifoldData: Float32Array[] = []; 1423 | const numBatches = Math.ceil(Math.pow( 1424 | NUM_MANIFOLD_CELLS + 1, this.noiseSize) / BATCH_SIZE); 1425 | const remainingDummy = BATCH_SIZE * numBatches - Math.pow( 1426 | NUM_MANIFOLD_CELLS + 1, this.noiseSize) * 2; 1427 | for (let k = 0; k < numBatches; ++k) { 1428 | const maniArray: Float32Array = 1429 | this.uniformNoiseProvider.getNextCopy().dataSync() as Float32Array; 1430 | for (let i = 0; i < (k + 1 < numBatches ? 1431 | BATCH_SIZE : BATCH_SIZE - remainingDummy); ++i) { 1432 | if (this.noiseSize >= 2) { 1433 | manifoldData.push(maniArray.slice(i * 2, i * 2 + 2)); 1434 | } else { 1435 | manifoldData.push(new Float32Array([maniArray[i], 0.5])); 1436 | } 1437 | } 1438 | } 1439 | 1440 | // Create grid cells. 1441 | const noiseData = this.noiseSize === 1 1442 | ? [{ points: manifoldData }] 1443 | : this.createGridCellsFromManifoldData(manifoldData); 1444 | 1445 | const gridData = d3.select('#svg-generator-manifold') 1446 | .selectAll('.grids').data(); 1447 | 1448 | const uniformDotsData = d3.select('#svg-generator-manifold') 1449 | .selectAll('.uniform-generated-dot').data(); 1450 | 1451 | const manifoldCell = 1452 | line() 1453 | .x((d: number[]) => d[0] * this.mediumPlotSizePx) 1454 | .y((d: number[]) => (1.0 - d[1]) * this.mediumPlotSizePx); 1455 | 1456 | // Visualize noise. 1457 | d3.select('#svg-generator-manifold') 1458 | .selectAll('.grids') 1459 | .data(noiseData) 1460 | .select('.manifold-cell') 1461 | .attr('d', (d: ManifoldCell) => manifoldCell( 1462 | d.points.map(point => [point[0], point[1]] as [number, number]) 1463 | )) 1464 | .style('fill-opacity', (d: ManifoldCell, i: number) => { 1465 | return this.selectedNoiseType === '2D Gaussian' 1466 | ? Math.min(0.1 + this.densitiesForGaussian[i] / 1467 | (d.area! * Math.pow(NUM_MANIFOLD_CELLS, 2)) * 0.2, 0.9) 1468 | : (this.noiseSize === 2 ? Math.max( 1469 | 0.9 - d.area! * 0.4 * Math.pow(NUM_MANIFOLD_CELLS, 2), 0.1) 1470 | : 'none'); 1471 | }); 1472 | 1473 | if (this.noiseSize === 1) { 1474 | d3.select('#svg-generator-manifold') 1475 | .selectAll('.uniform-generated-dot') 1476 | .data(manifoldData) 1477 | .attr('cx', (d: Float32Array) => d[0] * this.mediumPlotSizePx) 1478 | .attr('cy', (d: Float32Array) => 1479 | (1.0 - d[1]) * this.mediumPlotSizePx); 1480 | } 1481 | 1482 | // Transition to current manifold. 1483 | d3Transition.transition() 1484 | .select('#svg-generator-manifold') 1485 | .selectAll('.grids') 1486 | .selection() 1487 | .data(gridData) 1488 | .transition().duration(2000) 1489 | .select('.manifold-cell') 1490 | .attr('d', (d: ManifoldCell) => manifoldCell( 1491 | d.points.map(point => [point[0], point[1]] as [number, number]) 1492 | )) 1493 | .style('fill-opacity', (d: ManifoldCell, i: number) => { 1494 | return this.selectedNoiseType === '2D Gaussian' 1495 | ? Math.min(0.1 + this.densitiesForGaussian[i] / 1496 | (d.area! * Math.pow(NUM_MANIFOLD_CELLS, 2)) * 0.3, 0.9) 1497 | : (this.noiseSize === 2 ? Math.max( 1498 | 0.9 - d.area! * 0.4 * Math.pow(NUM_MANIFOLD_CELLS, 2), 0.1) 1499 | : 'none'); 1500 | }); 1501 | 1502 | if (this.noiseSize === 1) { 1503 | d3Transition.transition() 1504 | .select('#svg-generator-manifold') 1505 | .selectAll('.uniform-generated-dot') 1506 | .selection() 1507 | .data(uniformDotsData) 1508 | .transition().duration(2000) 1509 | .attr('cx', (d: Float32Array) => d[0] * this.mediumPlotSizePx) 1510 | .attr('cy', (d: Float32Array) => 1511 | (1.0 - d[1]) * this.mediumPlotSizePx); 1512 | } 1513 | } 1514 | } 1515 | 1516 | private async highlightStep(isForD: boolean, 1517 | componentElementNames: string[], tooltipElementName: string) { 1518 | await this.sleep(SLOW_INTERVAL_MS); 1519 | this.dehighlightStep(); 1520 | 1521 | this.highlightedComponents = 1522 | componentElementNames.map(componentElementName => 1523 | document.getElementById(componentElementName) as HTMLDivElement); 1524 | this.highlightedTooltip = 1525 | document.getElementById(tooltipElementName) as HTMLDivElement; 1526 | 1527 | this.highlightedComponents.forEach(component => 1528 | component.classList.add('highlighted')); 1529 | this.highlightedTooltip.classList.add('shown'); 1530 | this.highlightedTooltip.classList.add('highlighted'); 1531 | 1532 | await this.sleep(SLOW_INTERVAL_MS); 1533 | } 1534 | 1535 | private dehighlightStep() { 1536 | if (this.highlightedComponents) { 1537 | this.highlightedComponents.forEach(component => { 1538 | component.classList.remove('highlighted'); 1539 | }); 1540 | } 1541 | if (this.highlightedTooltip) { 1542 | this.highlightedTooltip.classList.remove('shown'); 1543 | this.highlightedTooltip.classList.remove('highlighted'); 1544 | } 1545 | } 1546 | 1547 | private async onClickSaveModelButton() { 1548 | const dTensors: tf.NamedTensorMap = 1549 | this.model.dVariables.reduce((obj, item, i) => { 1550 | obj[`d-${i}`] = item; 1551 | return obj; 1552 | }, {}); 1553 | const gTensors: tf.NamedTensorMap = 1554 | this.model.gVariables.reduce((obj, item, i) => { 1555 | obj[`g-${i}`] = item; 1556 | return obj; 1557 | }, {}); 1558 | const tensors: tf.NamedTensorMap = {...dTensors, ...gTensors}; 1559 | 1560 | const modelInfo: {} = { 1561 | 'shape_name': this.selectedShapeName, 1562 | 'iter_count': this.iterationCount, 1563 | 'config': { 1564 | selectedNoiseType: this.selectedNoiseType, 1565 | noiseSize: this.noiseSize, 1566 | numGeneratorLayers: this.numGeneratorLayers, 1567 | numDiscriminatorLayers: this.numDiscriminatorLayers, 1568 | numGeneratorNeurons: this.numGeneratorNeurons, 1569 | numDiscriminatorNeurons: this.numDiscriminatorNeurons, 1570 | dLearningRate: this.dLearningRate, 1571 | gLearningRate: this.gLearningRate, 1572 | dOptimizerType: this.dOptimizerType, 1573 | gOptimizerType: this.gOptimizerType, 1574 | lossType: this.lossType, 1575 | kDSteps: this.kDSteps, 1576 | kGSteps: this.kGSteps, 1577 | } 1578 | }; 1579 | const weightDataAndSpecs = await tf.io.encodeWeights(tensors); 1580 | const modelArtifacts: tf.io.ModelArtifacts = { 1581 | modelTopology: modelInfo, 1582 | weightSpecs: weightDataAndSpecs.specs, 1583 | weightData: weightDataAndSpecs.data, 1584 | }; 1585 | 1586 | const downloadTrigger = 1587 | tf.io.getSaveHandlers('downloads://ganlab_trained_model')[0]; 1588 | await downloadTrigger.save(modelArtifacts); 1589 | } 1590 | 1591 | private async loadPretrainedWeightFile(filename: string): 1592 | Promise { 1593 | const handler = 1594 | tf.io.browserHTTPRequest(`pretrained_models/${filename}.json`); 1595 | const loadedModel: tf.io.ModelArtifacts = await handler.load(); 1596 | 1597 | this.iterationCount = loadedModel.modelTopology['iter_count']; 1598 | 1599 | const loadedConfig: {} = loadedModel.modelTopology['config']; 1600 | for (let configProperty in loadedConfig) { 1601 | this[configProperty] = loadedConfig[configProperty]; 1602 | } 1603 | 1604 | document.getElementById('num-g-layers')!.innerText = 1605 | this.numGeneratorLayers.toString(); 1606 | document.getElementById('num-d-layers')!.innerText = 1607 | this.numDiscriminatorLayers.toString(); 1608 | document.getElementById('num-g-neurons')!.innerText = 1609 | this.numGeneratorNeurons.toString(); 1610 | document.getElementById('num-d-neurons')!.innerText = 1611 | this.numDiscriminatorNeurons.toString(); 1612 | document.getElementById('k-d-steps')!.innerText = this.kDSteps.toString(); 1613 | document.getElementById('k-g-steps')!.innerText = this.kGSteps.toString(); 1614 | 1615 | return loadedModel as Promise; 1616 | } 1617 | 1618 | private recreateCharts() { 1619 | document.getElementById('chart-container').style.visibility = 'hidden'; 1620 | 1621 | this.costChartData = new Array(2); 1622 | for (let i = 0; i < this.costChartData.length; ++i) { 1623 | this.costChartData[i] = []; 1624 | } 1625 | if (this.costChart != null) { 1626 | this.costChart.destroy(); 1627 | } 1628 | const costChartSpecification = [ 1629 | { label: 'Discriminator\'s Loss', color: 'rgba(5, 117, 176, 0.5)' }, 1630 | { label: 'Generator\'s Loss', color: 'rgba(123, 50, 148, 0.5)' } 1631 | ]; 1632 | this.costChart = this.createChart( 1633 | 'cost-chart', this.costChartData, costChartSpecification, 0); 1634 | 1635 | this.evalChartData = new Array(2); 1636 | for (let i = 0; i < this.evalChartData.length; ++i) { 1637 | this.evalChartData[i] = []; 1638 | } 1639 | if (this.evalChart != null) { 1640 | this.evalChart.destroy(); 1641 | } 1642 | const evalChartSpecification = [ 1643 | { label: 'KL Divergence (by grid)', color: 'rgba(220, 80, 20, 0.5)' }, 1644 | { label: 'JS Divergence (by grid)', color: 'rgba(200, 150, 10, 0.5)' } 1645 | ]; 1646 | this.evalChart = this.createChart( 1647 | 'eval-chart', this.evalChartData, evalChartSpecification, 0); 1648 | } 1649 | 1650 | private updateChartData(data: ChartData[][], xVal: number, yList: number[]) { 1651 | for (let i = 0; i < yList.length; ++i) { 1652 | data[i].push({ x: xVal, y: yList[i] ? yList[i].toFixed(3) : null}); 1653 | } 1654 | } 1655 | 1656 | private createChart( 1657 | canvasId: string, chartData: ChartData[][], 1658 | specification: Array<{ label: string, color: string }>, 1659 | min?: number, max?: number): Chart { 1660 | const context = (document.getElementById(canvasId) as HTMLCanvasElement) 1661 | .getContext('2d') as CanvasRenderingContext2D; 1662 | const chartDatasets = specification.map((chartSpec, i) => { 1663 | return { 1664 | data: chartData[i], 1665 | backgroundColor: chartSpec.color, 1666 | borderColor: chartSpec.color, 1667 | borderWidth: 1, 1668 | fill: false, 1669 | label: chartSpec.label, 1670 | lineTension: 0, 1671 | pointHitRadius: 8, 1672 | pointRadius: 0 1673 | }; 1674 | }); 1675 | 1676 | return new Chart(context, { 1677 | type: 'line', 1678 | data: { datasets: chartDatasets }, 1679 | options: { 1680 | animation: { duration: 0 }, 1681 | legend: { 1682 | labels: { boxWidth: 10 } 1683 | }, 1684 | responsive: false, 1685 | scales: { 1686 | xAxes: [{ type: 'linear', position: 'bottom' }], 1687 | yAxes: [{ ticks: { max, min } }] 1688 | } 1689 | } 1690 | }); 1691 | } 1692 | 1693 | private sleep(ms: number) { 1694 | return new Promise(resolve => { 1695 | const check = () => { 1696 | if (this.isPlaying) { 1697 | this.isPausedOngoingIteration = false; 1698 | resolve(); 1699 | } else { 1700 | this.isPausedOngoingIteration = true; 1701 | setTimeout(check, 1000); 1702 | } 1703 | }; 1704 | setTimeout(check, ms); 1705 | }); 1706 | } 1707 | } 1708 | 1709 | document.registerElement(GANLab.prototype.is, GANLab); 1710 | -------------------------------------------------------------------------------- /demo/ganlab_drawing.ts: -------------------------------------------------------------------------------- 1 | export class GANLabDrawing { 2 | private _drawingPositions: Array<[number, number]>; 3 | private isDrawing: boolean; 4 | private context: CanvasRenderingContext2D; 5 | 6 | constructor(private canvas: HTMLCanvasElement, private plotSizePx: number) { 7 | this._drawingPositions = []; 8 | this.isDrawing = false; 9 | 10 | this.context = canvas.getContext('2d'); 11 | this.context.strokeStyle = 'rgba(0, 136, 55, 0.25)'; 12 | this.context.lineJoin = 'round'; 13 | this.context.lineWidth = 10; 14 | const drawingContainer = 15 | document.getElementById('vis-content-container') as HTMLDivElement; 16 | const offsetLeft = drawingContainer.offsetLeft + 5; 17 | const offsetTop = drawingContainer.offsetTop + 15; 18 | 19 | this.canvas.addEventListener('mousedown', (event: MouseEvent) => { 20 | this.isDrawing = true; 21 | this.draw([event.pageX - offsetLeft, event.pageY - offsetTop]); 22 | }); 23 | this.canvas.addEventListener('mousemove', (event: MouseEvent) => { 24 | if (this.isDrawing) { 25 | this.draw([event.pageX - offsetLeft, event.pageY - offsetTop]); 26 | } 27 | }); 28 | this.canvas.addEventListener('mouseup', (event: Event) => { 29 | this.isDrawing = false; 30 | }); 31 | } 32 | 33 | get drawingPositions(): Array<[number, number]> { 34 | return this._drawingPositions; 35 | } 36 | 37 | prepareDrawing() { 38 | this._drawingPositions = []; 39 | this.context.clearRect( 40 | 0, 0, this.context.canvas.width, this.context.canvas.height); 41 | const drawingElement = 42 | document.getElementById('drawing-container') as HTMLElement; 43 | drawingElement.style.display = 'block'; 44 | const drawingBackgroundElement = 45 | document.getElementById('drawing-disable-background') as HTMLDivElement; 46 | drawingBackgroundElement.style.display = 'block'; 47 | } 48 | 49 | private draw(position: [number, number]) { 50 | this._drawingPositions.push( 51 | [position[0] / this.plotSizePx, 1.0 - position[1] / this.plotSizePx]); 52 | this.context.beginPath(); 53 | this.context.moveTo(position[0] - 1, position[1]); 54 | this.context.lineTo(position[0], position[1]); 55 | this.context.closePath(); 56 | this.context.stroke(); 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /demo/ganlab_evaluators.ts: -------------------------------------------------------------------------------- 1 | export class GANLabEvaluatorGridDensities { 2 | 3 | private gridTrueSampleCount: number[]; 4 | private gridTrueDensities: number[]; 5 | private gridGeneratedDensities: number[]; 6 | 7 | constructor(private numGrid: number) { 8 | this.gridTrueSampleCount = new Array(numGrid * numGrid).fill(0); 9 | this.gridTrueDensities = new Array(numGrid * numGrid).fill(0.0); 10 | this.gridGeneratedDensities = new Array(numGrid * numGrid); 11 | } 12 | 13 | private mapPointToGridIndex(point: [number, number]) { 14 | return Math.trunc(point[0] * this.numGrid) + 15 | this.numGrid * Math.trunc(point[1] * this.numGrid); 16 | } 17 | 18 | createGridsForTrue(trueAtlas: number[], numTrueSamples: number) { 19 | for (let i = 0; i < numTrueSamples; ++i) { 20 | const values = trueAtlas.splice(i * 2, i * 2 + 2); 21 | this.gridTrueSampleCount[this.mapPointToGridIndex( 22 | [values[0], values[1]])]++; 23 | this.gridTrueDensities[this.mapPointToGridIndex( 24 | [values[0], values[1]])] += 1.0 / numTrueSamples; 25 | } 26 | } 27 | 28 | updateGridsForGenerated(generatedSamples: Array<[number, number]>) { 29 | const numGeneratedSamples = generatedSamples.length; 30 | this.gridGeneratedDensities.fill(0.0); 31 | for (let i = 0; i < numGeneratedSamples; ++i) { 32 | this.gridGeneratedDensities[this.mapPointToGridIndex( 33 | generatedSamples[i])] += 1.0 / numGeneratedSamples; 34 | } 35 | } 36 | 37 | getKLDivergenceScore(): number { 38 | let score = 0.0; 39 | const smoothingEps = 0.0001; 40 | for (let j = 0; j < this.gridTrueDensities.length; ++j) { 41 | score += (this.gridTrueDensities[j] + smoothingEps) * Math.log2( 42 | (this.gridTrueDensities[j] + smoothingEps) / 43 | (this.gridGeneratedDensities[j] + smoothingEps)); 44 | } 45 | return score; 46 | } 47 | 48 | getJSDivergenceScore(): number { 49 | let leftJS = 0.0; 50 | let rightJS = 0.0; 51 | const smoothingEps = 0.0001; 52 | for (let j = 0; j < this.gridTrueDensities.length; ++j) { 53 | const averageDensity = 0.5 * 54 | (this.gridTrueDensities[j] + this.gridGeneratedDensities[j]); 55 | leftJS += (this.gridTrueDensities[j] + smoothingEps) * Math.log2( 56 | (this.gridTrueDensities[j] + smoothingEps) / 57 | (averageDensity + smoothingEps)); 58 | rightJS += (this.gridGeneratedDensities[j] + smoothingEps) * Math.log2( 59 | (this.gridGeneratedDensities[j] + smoothingEps) / 60 | (averageDensity + smoothingEps)); 61 | } 62 | return 0.5 * (leftJS + rightJS); 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /demo/ganlab_input_providers.ts: -------------------------------------------------------------------------------- 1 | import * as tf from '@tensorflow/tfjs-core'; 2 | 3 | export interface InputProvider { 4 | getNextCopy(): tf.Tensor2D; 5 | disposeCopy(copy: tf.Tensor); 6 | } 7 | 8 | export abstract class GANLabInputProviderBuilder { 9 | protected atlas: tf.Tensor2D; 10 | protected providerCounter: number; 11 | 12 | constructor(protected batchSize: number) { 13 | this.providerCounter = -1; 14 | } 15 | 16 | protected abstract generateAtlas(): void; 17 | 18 | abstract getInputProvider(fixStarting?: boolean): InputProvider; 19 | } 20 | 21 | export class GANLabNoiseProviderBuilder extends 22 | GANLabInputProviderBuilder { 23 | 24 | constructor( 25 | private noiseSize: number, private noiseType: string, 26 | private atlasSize: number, batchSize: number) { 27 | super(batchSize); 28 | } 29 | 30 | generateAtlas() { 31 | if (this.noiseType === '1D Gaussian' || 32 | this.noiseType === '2D Gaussian') { 33 | this.atlas = tf.truncatedNormal( 34 | [this.atlasSize, this.noiseSize], 0.5, 0.25); 35 | } else { 36 | this.atlas = tf.randomUniform( 37 | [this.atlasSize, this.noiseSize], 0.0, 1.0); 38 | } 39 | } 40 | 41 | getInputProvider(fixStarting?: boolean): InputProvider { 42 | const provider = this; 43 | return { 44 | getNextCopy(): tf.Tensor2D { 45 | provider.providerCounter++; 46 | return provider.atlas.slice( 47 | [fixStarting ? 0 : 48 | (provider.providerCounter * provider.batchSize) % 49 | provider.atlasSize, 0], 50 | [provider.batchSize, provider.noiseSize] 51 | ); 52 | }, 53 | disposeCopy(copy: tf.Tensor) { 54 | copy.dispose(); 55 | } 56 | }; 57 | } 58 | 59 | getNoiseSample(): Float32Array { 60 | return this.atlas.slice( 61 | [0, 0], [this.batchSize, this.noiseSize]).dataSync() as Float32Array; 62 | } 63 | } 64 | 65 | export class GANLabTrueSampleProviderBuilder extends 66 | GANLabInputProviderBuilder { 67 | 68 | private inputAtlasList: number[]; 69 | 70 | constructor( 71 | private atlasSize: number, 72 | private selectedShapeName: string, 73 | private drawingPositions: Array<[number, number]>, 74 | batchSize: number) { 75 | super(batchSize); 76 | this.inputAtlasList = []; 77 | } 78 | 79 | generateAtlas() { 80 | for (let i = 0; i < this.atlasSize; ++i) { 81 | const distribution = this.sampleFromTrueDistribution( 82 | this.selectedShapeName, this.drawingPositions); 83 | this.inputAtlasList.push(distribution[0]); 84 | this.inputAtlasList.push(distribution[1]); 85 | } 86 | this.atlas = tf.tensor2d(this.inputAtlasList, [this.atlasSize, 2]); 87 | } 88 | 89 | getInputProvider(fixStarting?: boolean): InputProvider { 90 | const provider = this; 91 | return { 92 | getNextCopy(): tf.Tensor2D { 93 | provider.providerCounter++; 94 | return provider.atlas.slice( 95 | [fixStarting ? 0 : 96 | (provider.providerCounter * provider.batchSize) % 97 | provider.atlasSize, 0], 98 | [provider.batchSize, 2] 99 | ); 100 | }, 101 | disposeCopy(copy: tf.Tensor) { 102 | copy.dispose(); 103 | } 104 | }; 105 | } 106 | 107 | getInputAtlas(): number[] { 108 | return this.inputAtlasList; 109 | } 110 | 111 | private sampleFromTrueDistribution( 112 | selectedShapeName: string, drawingPositions: Array<[number, number]>) { 113 | const rand = Math.random(); 114 | switch (selectedShapeName) { 115 | case 'drawing': { 116 | const index = Math.floor(drawingPositions.length * rand); 117 | return [ 118 | drawingPositions[index][0] + 119 | 0.02 * this.randNormal(), 120 | drawingPositions[index][1] + 121 | 0.02 * this.randNormal() 122 | ]; 123 | } 124 | case 'line': { 125 | return [ 126 | 0.8 - 0.75 * rand + 0.01 * this.randNormal(), 127 | 0.6 + 0.3 * rand + 0.01 * this.randNormal() 128 | ]; 129 | } 130 | case 'gaussians': { 131 | if (rand < 0.5) { 132 | return [ 133 | 0.3 + 0.1 * this.randNormal(), 134 | 0.7 + 0.1 * this.randNormal() 135 | ]; 136 | } else { 137 | return [ 138 | 0.7 + 0.05 * this.randNormal(), 139 | 0.4 + 0.2 * this.randNormal() 140 | ]; 141 | } 142 | } 143 | case 'ring': { 144 | return [ 145 | 0.5 + 0.3 * Math.cos(rand * Math.PI * 2) + 146 | 0.025 * this.randNormal(), 147 | 0.45 + 0.25 * Math.sin(rand * Math.PI * 2) + 148 | 0.025 * this.randNormal(), 149 | ]; 150 | } 151 | case 'disjoint': { 152 | const stdev = 0.025; 153 | if (rand < 0.333) { 154 | return [ 155 | 0.35 + stdev * this.randNormal(), 156 | 0.75 + stdev * this.randNormal() 157 | ]; 158 | } else if (rand < 0.666) { 159 | return [ 160 | 0.75 + stdev * this.randNormal(), 161 | 0.6 + stdev * this.randNormal() 162 | ]; 163 | } else { 164 | return [ 165 | 0.45 + stdev * this.randNormal(), 166 | 0.35 + stdev * this.randNormal() 167 | ]; 168 | } 169 | } 170 | default: { 171 | throw new Error('Invalid true distribution'); 172 | } 173 | } 174 | } 175 | 176 | randNormal() { 177 | const u = 1 - Math.random(); 178 | const v = 1 - Math.random(); 179 | return Math.sqrt(-2.0 * Math.log(u)) * Math.cos(2.0 * Math.PI * v); 180 | } 181 | } 182 | 183 | export class GANLabUniformNoiseProviderBuilder extends 184 | GANLabInputProviderBuilder { 185 | 186 | constructor( 187 | private noiseSize: number, 188 | private numManifoldCells: number, batchSize: number) { 189 | super(batchSize); 190 | } 191 | 192 | generateAtlas() { 193 | const inputAtlasList = []; 194 | if (this.noiseSize === 1) { 195 | for (let i = 0; i < this.numManifoldCells + 1; ++i) { 196 | inputAtlasList.push(i / this.numManifoldCells); 197 | } 198 | } else if (this.noiseSize === 2) { 199 | for (let i = 0; i < this.numManifoldCells + 1; ++i) { 200 | for (let j = 0; j < this.numManifoldCells + 1; ++j) { 201 | inputAtlasList.push(i / this.numManifoldCells); 202 | inputAtlasList.push(j / this.numManifoldCells); 203 | } 204 | } 205 | } 206 | while ((inputAtlasList.length / this.noiseSize) % this.batchSize > 0) { 207 | inputAtlasList.push(0.5); 208 | } 209 | this.atlas = tf.tensor2d(inputAtlasList, 210 | [inputAtlasList.length / this.noiseSize, this.noiseSize]); 211 | } 212 | 213 | getInputProvider(): InputProvider { 214 | const provider = this; 215 | return { 216 | getNextCopy(): tf.Tensor2D { 217 | provider.providerCounter++; 218 | if (provider.providerCounter * provider.batchSize > 219 | Math.pow(provider.numManifoldCells + 1, provider.noiseSize)) { 220 | provider.providerCounter = 0; 221 | } 222 | return provider.atlas.slice( 223 | [ 224 | (provider.providerCounter * provider.batchSize) % 225 | Math.pow(provider.numManifoldCells + 1, provider.noiseSize), 226 | 0 227 | ], 228 | [provider.batchSize, provider.noiseSize]); 229 | }, 230 | disposeCopy(copy: tf.Tensor) { 231 | copy.dispose(); 232 | } 233 | }; 234 | } 235 | 236 | calculateDensitiesForGaussian(): number[] { 237 | if (this.noiseSize === 2) { 238 | const densities: number[] = []; 239 | for (let i = 0; i < this.numManifoldCells; ++i) { 240 | for (let j = 0; j < this.numManifoldCells; ++j) { 241 | densities.push(this.probDensity( 242 | (i + 0.5) / this.numManifoldCells, 243 | (j + 0.5) / this.numManifoldCells)); 244 | } 245 | } 246 | return densities; 247 | } else { 248 | return []; 249 | } 250 | } 251 | 252 | private probDensity(x: number, y: number) { 253 | const mu = 0.5; 254 | const std = 0.25; 255 | return 1.0 / (2.0 * Math.PI * std * std) * Math.exp(-0.5 / 256 | (std * std) * ((x - mu) * (x - mu) + (y - mu) * (y - mu))); 257 | } 258 | } 259 | 260 | export class GANLabUniformSampleProviderBuilder extends 261 | GANLabInputProviderBuilder { 262 | 263 | constructor(private numGridCells: number, batchSize: number) { 264 | super(batchSize); 265 | } 266 | 267 | generateAtlas() { 268 | const inputAtlasList = []; 269 | for (let j = 0; j < this.numGridCells; ++j) { 270 | for (let i = 0; i < this.numGridCells; ++i) { 271 | inputAtlasList.push((i + 0.5) / this.numGridCells); 272 | inputAtlasList.push((j + 0.5) / this.numGridCells); 273 | } 274 | } 275 | this.atlas = tf.tensor2d(inputAtlasList, 276 | [this.numGridCells * this.numGridCells, 2]); 277 | } 278 | 279 | getInputProvider(): InputProvider { 280 | const provider = this; 281 | return { 282 | getNextCopy(): tf.Tensor2D { 283 | provider.providerCounter++; 284 | return provider.atlas.slice( 285 | [ 286 | (provider.providerCounter * provider.batchSize) % 287 | (provider.numGridCells * provider.numGridCells), 288 | 0 289 | ], 290 | [provider.batchSize, 2]); 291 | }, 292 | disposeCopy(copy: tf.Tensor) { 293 | copy.dispose(); 294 | } 295 | }; 296 | } 297 | } 298 | -------------------------------------------------------------------------------- /demo/ganlab_models.ts: -------------------------------------------------------------------------------- 1 | import * as tf from '@tensorflow/tfjs-core'; 2 | 3 | // Hack to prevent error when using grads (doesn't allow this in model). 4 | let dVariables: tf.Variable[]; 5 | let numDiscriminatorLayers: number; 6 | let batchSize: number; 7 | 8 | export class GANLabModel { 9 | dVariables: tf.Variable[]; 10 | gVariables: tf.Variable[]; 11 | 12 | dOptimizer: tf.Optimizer; 13 | gOptimizer: tf.Optimizer; 14 | lossType: string; 15 | 16 | constructor( 17 | private noiseSize: number, 18 | private numGeneratorLayers: number, 19 | private numDiscriminatorLayers: number, 20 | private numGeneratorNeurons: number, 21 | private numDiscriminatorNeurons: number, 22 | private batchSize: number, 23 | lossType: string 24 | ) { } 25 | 26 | initializeModelVariables() { 27 | if (this.dVariables) { 28 | this.dVariables.forEach((v: tf.Tensor) => v.dispose()); 29 | } 30 | if (this.gVariables) { 31 | this.gVariables.forEach((v: tf.Tensor) => v.dispose()); 32 | } 33 | // Filter variable nodes for optimizers. 34 | this.dVariables = []; 35 | this.gVariables = []; 36 | 37 | // Generator. 38 | const gfc0W = tf.variable( 39 | tf.randomNormal( 40 | [this.noiseSize, this.numGeneratorNeurons], 0, 1.0 / Math.sqrt(2))); 41 | const gfc0B = tf.variable( 42 | tf.zeros([this.numGeneratorNeurons])); 43 | 44 | this.gVariables.push(gfc0W); 45 | this.gVariables.push(gfc0B); 46 | 47 | for (let i = 0; i < this.numGeneratorLayers; ++i) { 48 | const gfcW = tf.variable( 49 | tf.randomNormal( 50 | [this.numGeneratorNeurons, this.numGeneratorNeurons], 0, 51 | 1.0 / Math.sqrt(this.numGeneratorNeurons))); 52 | const gfcB = tf.variable(tf.zeros([this.numGeneratorNeurons])); 53 | 54 | this.gVariables.push(gfcW); 55 | this.gVariables.push(gfcB); 56 | } 57 | 58 | const gfcLastW = tf.variable( 59 | tf.randomNormal( 60 | [this.numGeneratorNeurons, 2], 0, 61 | 1.0 / Math.sqrt(this.numGeneratorNeurons))); 62 | const gfcLastB = tf.variable(tf.zeros([2])); 63 | 64 | this.gVariables.push(gfcLastW); 65 | this.gVariables.push(gfcLastB); 66 | 67 | // Discriminator. 68 | const dfc0W = tf.variable( 69 | tf.randomNormal( 70 | [2, this.numDiscriminatorNeurons], 0, 1.0 / Math.sqrt(2)), 71 | true); 72 | const dfc0B = tf.variable(tf.zeros([this.numDiscriminatorNeurons])); 73 | 74 | this.dVariables.push(dfc0W); 75 | this.dVariables.push(dfc0B); 76 | 77 | for (let i = 0; i < this.numDiscriminatorLayers; ++i) { 78 | const dfcW = tf.variable( 79 | tf.randomNormal( 80 | [this.numDiscriminatorNeurons, this.numDiscriminatorNeurons], 0, 81 | 1.0 / Math.sqrt(this.numDiscriminatorNeurons))); 82 | const dfcB = tf.variable(tf.zeros([this.numDiscriminatorNeurons])); 83 | 84 | this.dVariables.push(dfcW); 85 | this.dVariables.push(dfcB); 86 | } 87 | 88 | const dfcLastW = tf.variable( 89 | tf.randomNormal( 90 | [this.numDiscriminatorNeurons, 1], 0, 91 | 1.0 / Math.sqrt(this.numDiscriminatorNeurons))); 92 | const dfcLastB = tf.variable(tf.zeros([1])); 93 | 94 | this.dVariables.push(dfcLastW); 95 | this.dVariables.push(dfcLastB); 96 | 97 | // Hack to prevent error when using grads (doesn't allow this in model). 98 | dVariables = this.dVariables; 99 | numDiscriminatorLayers = this.numDiscriminatorLayers; 100 | batchSize = this.batchSize; 101 | } 102 | 103 | async loadPretrainedWeights(loadedModel: tf.io.ModelArtifacts) { 104 | const decoded = tf.io.decodeWeights( 105 | loadedModel.weightData, loadedModel.weightSpecs); 106 | 107 | this.dVariables.forEach((v: tf.Variable, i) => { 108 | v.assign(decoded[`d-${i}`]); 109 | }); 110 | this.gVariables.forEach((v: tf.Variable, i) => { 111 | v.assign(decoded[`g-${i}`]); 112 | }); 113 | 114 | dVariables = this.dVariables; 115 | } 116 | 117 | generator(noiseTensor: tf.Tensor2D): tf.Tensor2D { 118 | const gfc0W = this.gVariables[0] as tf.Tensor2D; 119 | const gfc0B = this.gVariables[1]; 120 | 121 | let network = noiseTensor.matMul(gfc0W) 122 | .add(gfc0B) 123 | .relu(); 124 | 125 | for (let i = 0; i < this.numGeneratorLayers; ++i) { 126 | const gfcW = this.gVariables[2 + i * 2] as tf.Tensor2D; 127 | const gfcB = this.gVariables[3 + i * 2]; 128 | 129 | network = network.matMul(gfcW) 130 | .add(gfcB) 131 | .relu(); 132 | } 133 | 134 | const gfcLastW = 135 | this.gVariables[2 + this.numGeneratorLayers * 2] as tf.Tensor2D; 136 | const gfcLastB = 137 | this.gVariables[3 + this.numGeneratorLayers * 2]; 138 | 139 | const generatedTensor: tf.Tensor2D = network.matMul(gfcLastW) 140 | .add(gfcLastB) 141 | .tanh() as tf.Tensor2D; 142 | 143 | return generatedTensor; 144 | } 145 | 146 | discriminator(inputTensor: tf.Tensor2D): tf.Tensor1D { 147 | const dfc0W = /*this.*/dVariables[0] as tf.Tensor2D; 148 | const dfc0B = /*this.*/dVariables[1]; 149 | 150 | let network = inputTensor.matMul(dfc0W) 151 | .add(dfc0B) 152 | .relu(); 153 | 154 | for (let i = 0; i < /*this.*/numDiscriminatorLayers; ++i) { 155 | const dfcW = /*this.*/dVariables[2 + i * 2] as tf.Tensor2D; 156 | const dfcB = /*this.*/dVariables[3 + i * 2]; 157 | 158 | network = network.matMul(dfcW) 159 | .add(dfcB) 160 | .relu(); 161 | } 162 | const dfcLastW = 163 | /*this.*/dVariables[2 + /*this.*/numDiscriminatorLayers * 2] as 164 | tf.Tensor2D; 165 | const dfcLastB = 166 | /*this.*/dVariables[3 + /*this.*/numDiscriminatorLayers * 2]; 167 | 168 | const predictionTensor: tf.Tensor1D = 169 | network.matMul(dfcLastW) 170 | .add(dfcLastB) 171 | .sigmoid() 172 | .reshape([/*this.*/batchSize]); 173 | 174 | return predictionTensor; 175 | } 176 | 177 | // Define losses. 178 | dLoss(truePred: tf.Tensor1D, generatedPred: tf.Tensor1D) { 179 | if (this.lossType === 'LeastSq loss') { 180 | return tf.add( 181 | truePred.sub(tf.scalar(1)).square().mean(), 182 | generatedPred.square().mean() 183 | ) as tf.Scalar; 184 | } else { 185 | return tf.add( 186 | truePred.log().mul(tf.scalar(0.95)).mean(), 187 | tf.sub(tf.scalar(1), generatedPred).log().mean() 188 | ).mul(tf.scalar(-1)) as tf.Scalar; 189 | } 190 | } 191 | 192 | gLoss(generatedPred: tf.Tensor1D) { 193 | if (this.lossType === 'LeastSq loss') { 194 | return generatedPred.sub(tf.scalar(1)).square().mean() as tf.Scalar; 195 | } else { 196 | return generatedPred.log().mean().mul(tf.scalar(-1)) as tf.Scalar; 197 | } 198 | } 199 | 200 | updateOptimizer( 201 | dOrG: string, optimizerType: string, learningRate: number) { 202 | if (optimizerType === 'Adam') { 203 | const beta1 = 0.9; 204 | const beta2 = 0.999; 205 | if (dOrG === 'D') { 206 | this.dOptimizer = tf.train.adam(learningRate, beta1, beta2); 207 | } 208 | if (dOrG === 'G') { 209 | this.gOptimizer = tf.train.adam(learningRate, beta1, beta2); 210 | } 211 | } else { 212 | if (dOrG === 'D') { 213 | this.dOptimizer = tf.train.sgd(learningRate); 214 | } 215 | if (dOrG === 'G') { 216 | this.gOptimizer = tf.train.sgd(learningRate); 217 | } 218 | } 219 | } 220 | } 221 | -------------------------------------------------------------------------------- /demo/images/distribution-disjoint.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/poloclub/ganlab/b6deea8ec40c38ec2879f33b0dbe1645b59f4ef0/demo/images/distribution-disjoint.png -------------------------------------------------------------------------------- /demo/images/distribution-gaussians.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/poloclub/ganlab/b6deea8ec40c38ec2879f33b0dbe1645b59f4ef0/demo/images/distribution-gaussians.png -------------------------------------------------------------------------------- /demo/images/distribution-line.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/poloclub/ganlab/b6deea8ec40c38ec2879f33b0dbe1645b59f4ef0/demo/images/distribution-line.png -------------------------------------------------------------------------------- /demo/images/distribution-ring.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/poloclub/ganlab/b6deea8ec40c38ec2879f33b0dbe1645b59f4ef0/demo/images/distribution-ring.png -------------------------------------------------------------------------------- /demo/images/github-mark.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/poloclub/ganlab/b6deea8ec40c38ec2879f33b0dbe1645b59f4ef0/demo/images/github-mark.png -------------------------------------------------------------------------------- /demo/images/share.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/poloclub/ganlab/b6deea8ec40c38ec2879f33b0dbe1645b59f4ef0/demo/images/share.png -------------------------------------------------------------------------------- /demo/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 21 | 22 | 23 | GAN Lab: Play with Generative Adversarial Networks in Your Browser! 24 | 177 | 178 | 179 | 180 |
181 |
182 |
183 | Play with Generative Adversarial Networks (GANs) in your browser! 184 |
185 | 191 |
192 |
193 | 194 |
195 |
196 | Loading... 197 |
198 | It may take a few tens of seconds. 199 |
200 | 201 |
202 | 203 |
204 |

What is a GAN?

205 |

206 | Many machine learning systems look at some kind of complicated input (say, an image) and produce a simple output (a label like, "cat"). By contrast, the goal of a generative model is something like the opposite: take a small piece of input—perhaps a few random numbers—and produce a complex output, like an image of a realistic-looking face. A generative adversarial network (GAN) is an especially effective type of generative model, introduced only a few years ago, which has been a subject of intense interest in the machine learning community. 207 |

208 | 209 |

210 | You might wonder why we want a system that produces realistic images, or plausible simulations of any other kind of data. Besides the intrinsic intellectual challenge, this turns out to be a surprisingly handy tool, with applications ranging from art to enhancing blurry images. 211 |

212 | 213 |

How does a GAN work?

214 |

215 | The idea of a machine "creating" realistic images from scratch can seem like magic, but GANs use two key tricks to turn a vague, seemingly impossible goal into reality. 216 |

217 | 218 |

219 | The first idea, not new to GANs, is to use randomness as an ingredient. At a basic level, this makes sense: it wouldn't be very exciting if you built a system that produced the same face each time it ran. Just as important, though, is that thinking in terms of probabilities also helps us translate the problem of generating images into a natural mathematical framework. We obviously don't want to pick images at uniformly at random, since that would just produce noise. Instead, we want our system to learn about which images are likely to be faces, and which aren't. Mathematically, this involves modeling a probability distribution on images, that is, a function that tells us which images are likely to be faces and which aren't. This type of problem—modeling a function on a high-dimensional space—is exactly the sort of thing neural networks are made for. 220 |

221 | 222 |

223 | The big insights that defines a GAN is to set up this modeling problem as a kind of contest. This is where the "adversarial" part of the name comes from. The key idea is to build not one, but two competing networks: a generator and a discriminator. The generator tries to create random synthetic outputs (for instance, images of faces), while the discriminator tries to tell these apart from real outputs (say, a database of celebrities). The hope is that as the two networks face off, they'll both get better and better—with the end result being a generator network that produces realistic outputs. 224 |

225 | 226 |

227 | To sum up: Generative adversarial networks are neural networks that learn to choose samples from a special distribution (the "generative" part of the name), and they do this by setting up a competition (hence "adversarial"). 228 |

229 | 230 |

What's happening in the visualization?

231 |

232 | GANs are complicated beasts, and the visualization has a lot going on. Here are the basic ideas. 233 |

234 | 235 |

236 | First, we're not visualizing anything as complex as generating realistic images. Instead, we're showing a GAN that learns a distribution of points in just two dimensions. There's no real application of something this simple, but it's much easier to show the system's mechanics. For one thing, probability distributions in plain old 2D (x,y) space are much easier to visualize than distributions in the space of high-resolution images. 237 |

238 | 239 |

240 | Pick a data distribution. 241 |

242 | 243 |

244 | At top, you can choose a probability distribution for GAN to learn, which we visualize as a set of data samples. Once you choose one, we show them at two places: a smaller version in the model overview graph view on the left; and a larger version in the layered distributions view on the right. 245 |

246 | 247 |
248 | 249 |
250 | Figure 1. Selected data distribution is shown at two places. 251 |
252 |
253 | 254 |

255 | We designed the two views to help you better understand how a GAN works to generate realistic samples: 256 |
257 | (1) The model overview graph shows the architecture of a GAN, its major components and how they are connected, and also visualizes results produced by the components; 258 |
259 | (2) The layered distributions view overlays the visualizations of the components from the model overview graph, so you can more easily compare the component outputs when analyzing the model. 260 |

261 | 262 |

263 | Let training begin. 264 |

265 | 266 |

267 | To start training the GAN model, click the play button () on the toolbar. Besides real samples from your chosen distribution, you'll also see fake samples that are generated by the model. Fake samples' positions continually updated as the training progresses. A perfect GAN will create fake samples whose distribution is indistinguishable from that of the real samples. When that happens, in the layered distributions view, you will see the two distributions nicely overlap. 268 |

269 | 270 |
271 | 272 |
273 | Figure 2. Fake samples' positions continually updated as the training progresses. Then, the distributions of the real and fake samples nicely overlap. 274 |
275 |
276 | 277 |

278 | Visualizing generator and discriminator. 279 |

280 | 281 |

282 | Recall that the generator and discriminator within a GAN is having a little contest, competing against each other, iteratively updating the fake samples to become more similar to the real ones. GAN Lab visualizes the interactions between them. 283 |

284 | 285 |

286 | Generator. 287 | As described earlier, the generator is a function that transforms a random input into a synthetic output. In GAN Lab, a random input is a 2D sample with a (x, y) value (drawn from a uniform or Gaussian distribution), and the output is also a 2D sample, but mapped into a different position, which is a fake sample. One way to visualize this mapping is using manifold [Olah, 2014]. The input space is represented as a uniform square grid. As the function maps positions in the input space into new positions, if we visualize the output, the whole grid, now consisting of irregular quadrangles, would look like a warped version of the original regular grid. The area (or density) of each (warped) cell has now changed, and we encode the density as opacity, so a higher opacity means more samples in smaller space. A very fine-grained manifold will look almost the same as the visualization of the fake samples. This visualization shows how the generator learns a mapping function to make its output look similar to the distribution of the real samples. 288 |

289 | 290 |
291 | 292 |
293 | Figure 3. The generator's data transformation is visualized as a manifold, which turns input noise (leftmost) into fake samples (rightmost). 294 |
295 |
296 | 297 |

298 | Discriminator. 299 | As the generator creates fake samples, the discriminator, a binary classifier, tries to tell them apart from the real samples. GAN Lab visualizes its decision boundary as a 2D heatmap (similar to TensorFlow Playground). The background colors of a grid cell encode the confidence values of the classifier's results. Darker green means that samples in that region are more likely to be real; darker purple, more likely to be fake. As a GAN approaches the optimum, the whole heatmap will become more gray overall, signalling that the discriminator can no longer easily distinguish fake examples from the real ones. 300 |

301 | 302 |
303 | 304 |
305 | Figure 4. The discriminator's performance can be interpreted through a 2D heatmap. Here, the discriminator is performing well, since most real samples lies on its classification surface’s green region (and fake samples on purple region). 306 |
307 |
308 | 309 |

310 | Understanding interplay between generator and discriminator. 311 |

312 | 313 |

314 | In a GAN, its two networks influence each other as they iteratively update themselves. A great use for GAN Lab is to use its visualization to learn how the generator incrementally updates to improve itself to generate fake samples that are increasingly more realistic. The generator does it by trying to fool the discriminator. The generator's loss value decreases when the discriminator classifies fake samples as real (bad for discriminator, but good for generator). GAN Lab visualizes gradients (as pink lines) for the fake samples such that the generator would achieve its success. 315 |

316 | 317 |
318 | 319 |
320 | Figure 5. Fake samples' movement directions are indicated by the generator’s gradients (pink lines) based on those samples' current locations and the discriminator's curren classification surface (visualized by background colors). 321 |
322 |
323 | 324 |

325 | This way, the generator gradually improves to produce samples that are even more realistic. Once the fake samples are updated, the discriminator will update accordingly to finetune its decision boundary, and awaits the next batch of fake samples that try to fool itself. This iterative update process continues until the discriminator cannot tell real and fake samples apart. 326 |

327 | 328 |

329 | Playing with interactive features. 330 |

331 | 332 |

333 | GAN Lab has many cool features that support interactive experimentation. 334 |

335 |
336 | Interactive hyperparameter adjustment 337 |
338 |
339 | Click the edit icon () to reveal individual hyperparameters, and edit them on the fly during training. 340 |
341 |
342 | User-defined data distribution 343 |
344 |
345 | If you don't like our selection of distributions, draw your own by clicking the icon () at the end of the data distributions list. 346 |
347 |
348 | Slow-motion mode 349 |
350 |
351 | Lost track of the animation? Then you can slow it down by clicking the slow-motion icon () to enter slow-mo. 352 | Check out this video 353 |
354 |
355 | Manual step-by-step execution 356 |
357 |
358 | If you want more control, you can manually train individual iteration step by step by clicking the icon (). 359 | Check out this video 360 |
361 |
362 |

363 | 364 |

365 | Check out the following video for a quick look at GAN Lab's features. 366 |

    367 | 371 | 375 | 379 | 383 | 387 |
388 | 389 | 394 |

395 | 396 |

How is it implemented?

397 |

398 | GAN Lab uses TensorFlow.js, 399 | an in-browser GPU-accelerated deep learning library. 400 | Everything, from model training to visualization, is implemented with 401 | JavaScript. You only need a web browser like Chrome to run GAN Lab. 402 | Our implementation approach significantly broadens people's access to 403 | interactive tools for deep learning. 404 | The source code is available on 405 | GitHub. 406 |

407 | 408 |

Who developed GAN Lab?

409 |

410 | GAN Lab was created by 411 | Minsuk Kahng, 412 | Nikhil Thorat, 413 | Polo Chau, 414 | Fernanda Viégas, and 415 | Martin Wattenberg, 416 | which was the result of a research collaboration between 417 | Georgia Tech and Google 418 | Brain/PAIR. 419 | We also thank Shan Carter and Daniel Smilkov, 420 | 421 | Google Big Picture team and 422 | 423 | Google People + AI Research (PAIR), and 424 | Georgia Tech Visualization Lab 425 | for their feedback. 426 |

427 | 428 |

429 | For more information, check out 430 | 431 | our research paper: 432 |

433 | 434 | 435 | 436 | 437 | 438 |
439 | Minsuk Kahng, 440 | Nikhil Thorat, 441 | Polo Chau, 442 | Fernanda Viégas, and 443 | Martin Wattenberg. 444 | "GAN Lab: Understanding Complex Deep Generative Models using 445 | Interactive Visual Experimentation." 446 | IEEE Transactions on Visualization and Computer Graphics, 25(1) 447 | (VAST 2018), 448 | Jan. 2019. 449 |
450 |
451 | 452 | 480 | 481 | 482 | 483 | -------------------------------------------------------------------------------- /demo/pretrained_models/pretrained_disjoint.json: -------------------------------------------------------------------------------- 1 | {"modelTopology":{"shape_name":"disjoint","iter_count":1710,"config":{"selectedNoiseType":"2D Gaussian","noiseSize":2,"numGeneratorLayers":2,"numDiscriminatorLayers":2,"numGeneratorNeurons":10,"numDiscriminatorNeurons":11,"dLearningRate":0.1,"gLearningRate":0.03,"dOptimizerType":"SGD","gOptimizerType":"SGD","lossType":"Log loss","kDSteps":1,"kGSteps":1}},"weightsManifest":[{"paths":["./pretrained_disjoint.weights.bin"],"weights":[{"name":"d-0","shape":[2,11],"dtype":"float32"},{"name":"d-1","shape":[11],"dtype":"float32"},{"name":"d-2","shape":[11,11],"dtype":"float32"},{"name":"d-3","shape":[11],"dtype":"float32"},{"name":"d-4","shape":[11,11],"dtype":"float32"},{"name":"d-5","shape":[11],"dtype":"float32"},{"name":"d-6","shape":[11,1],"dtype":"float32"},{"name":"d-7","shape":[1],"dtype":"float32"},{"name":"g-0","shape":[2,10],"dtype":"float32"},{"name":"g-1","shape":[10],"dtype":"float32"},{"name":"g-2","shape":[10,10],"dtype":"float32"},{"name":"g-3","shape":[10],"dtype":"float32"},{"name":"g-4","shape":[10,10],"dtype":"float32"},{"name":"g-5","shape":[10],"dtype":"float32"},{"name":"g-6","shape":[10,2],"dtype":"float32"},{"name":"g-7","shape":[2],"dtype":"float32"}]}]} -------------------------------------------------------------------------------- /demo/pretrained_models/pretrained_disjoint.weights.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/poloclub/ganlab/b6deea8ec40c38ec2879f33b0dbe1645b59f4ef0/demo/pretrained_models/pretrained_disjoint.weights.bin -------------------------------------------------------------------------------- /demo/pretrained_models/pretrained_gaussians.json: -------------------------------------------------------------------------------- 1 | {"modelTopology":{"shape_name":"gaussians","iter_count":1931,"config":{"selectedNoiseType":"2D Gaussian","noiseSize":2,"numGeneratorLayers":1,"numDiscriminatorLayers":1,"numGeneratorNeurons":8,"numDiscriminatorNeurons":8,"dLearningRate":0.1,"gLearningRate":0.03,"dOptimizerType":"SGD","gOptimizerType":"SGD","lossType":"Log loss","kDSteps":2,"kGSteps":1}},"weightsManifest":[{"paths":["./pretrained_gaussians.weights.bin"],"weights":[{"name":"d-0","shape":[2,8],"dtype":"float32"},{"name":"d-1","shape":[8],"dtype":"float32"},{"name":"d-2","shape":[8,8],"dtype":"float32"},{"name":"d-3","shape":[8],"dtype":"float32"},{"name":"d-4","shape":[8,1],"dtype":"float32"},{"name":"d-5","shape":[1],"dtype":"float32"},{"name":"g-0","shape":[2,8],"dtype":"float32"},{"name":"g-1","shape":[8],"dtype":"float32"},{"name":"g-2","shape":[8,8],"dtype":"float32"},{"name":"g-3","shape":[8],"dtype":"float32"},{"name":"g-4","shape":[8,2],"dtype":"float32"},{"name":"g-5","shape":[2],"dtype":"float32"}]}]} -------------------------------------------------------------------------------- /demo/pretrained_models/pretrained_gaussians.weights.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/poloclub/ganlab/b6deea8ec40c38ec2879f33b0dbe1645b59f4ef0/demo/pretrained_models/pretrained_gaussians.weights.bin -------------------------------------------------------------------------------- /demo/pretrained_models/pretrained_line.json: -------------------------------------------------------------------------------- 1 | {"modelTopology":{"shape_name":"line","iter_count":466,"config":{"selectedNoiseType":"2D Uniform","noiseSize":2,"numGeneratorLayers":1,"numDiscriminatorLayers":1,"numGeneratorNeurons":5,"numDiscriminatorNeurons":6,"dLearningRate":0.03,"gLearningRate":0.01,"dOptimizerType":"SGD","gOptimizerType":"SGD","lossType":"Log loss","kDSteps":2,"kGSteps":1}},"weightsManifest":[{"paths":["./pretrained_line.weights.bin"],"weights":[{"name":"d-0","shape":[2,6],"dtype":"float32"},{"name":"d-1","shape":[6],"dtype":"float32"},{"name":"d-2","shape":[6,6],"dtype":"float32"},{"name":"d-3","shape":[6],"dtype":"float32"},{"name":"d-4","shape":[6,1],"dtype":"float32"},{"name":"d-5","shape":[1],"dtype":"float32"},{"name":"g-0","shape":[2,5],"dtype":"float32"},{"name":"g-1","shape":[5],"dtype":"float32"},{"name":"g-2","shape":[5,5],"dtype":"float32"},{"name":"g-3","shape":[5],"dtype":"float32"},{"name":"g-4","shape":[5,2],"dtype":"float32"},{"name":"g-5","shape":[2],"dtype":"float32"}]}]} -------------------------------------------------------------------------------- /demo/pretrained_models/pretrained_line.weights.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/poloclub/ganlab/b6deea8ec40c38ec2879f33b0dbe1645b59f4ef0/demo/pretrained_models/pretrained_line.weights.bin -------------------------------------------------------------------------------- /demo/pretrained_models/pretrained_ring.json: -------------------------------------------------------------------------------- 1 | {"modelTopology":{"shape_name":"Ring","iter_count":812,"config":{"selectedNoiseType":"2D Uniform","noiseSize":2,"numGeneratorLayers":1,"numDiscriminatorLayers":2,"numGeneratorNeurons":10,"numDiscriminatorNeurons":9,"dLearningRate":0.1,"gLearningRate":0.03,"dOptimizerType":"SGD","gOptimizerType":"SGD","lossType":"Log loss","kDSteps":3,"kGSteps":1}},"weightsManifest":[{"paths":["../pretrained_models/pretrained_ring.weights.bin"],"weights":[{"name":"d-0","shape":[2,9],"dtype":"float32"},{"name":"d-1","shape":[9],"dtype":"float32"},{"name":"d-2","shape":[9,9],"dtype":"float32"},{"name":"d-3","shape":[9],"dtype":"float32"},{"name":"d-4","shape":[9,9],"dtype":"float32"},{"name":"d-5","shape":[9],"dtype":"float32"},{"name":"d-6","shape":[9,1],"dtype":"float32"},{"name":"d-7","shape":[1],"dtype":"float32"},{"name":"g-0","shape":[2,10],"dtype":"float32"},{"name":"g-1","shape":[10],"dtype":"float32"},{"name":"g-2","shape":[10,10],"dtype":"float32"},{"name":"g-3","shape":[10],"dtype":"float32"},{"name":"g-4","shape":[10,2],"dtype":"float32"},{"name":"g-5","shape":[2],"dtype":"float32"}]}]} -------------------------------------------------------------------------------- /demo/pretrained_models/pretrained_ring.weights.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/poloclub/ganlab/b6deea8ec40c38ec2879f33b0dbe1645b59f4ef0/demo/pretrained_models/pretrained_ring.weights.bin -------------------------------------------------------------------------------- /ganlab-teaser.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/poloclub/ganlab/b6deea8ec40c38ec2879f33b0dbe1645b59f4ef0/ganlab-teaser.png -------------------------------------------------------------------------------- /lib/chartjs.d.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * @license 3 | * This project is licensed under the MIT license. 4 | * Copyrights are respective of each contributor listed at the beginning of each 5 | * definition file. 6 | * Permission is hereby granted, free of charge, to any person obtaining a copy 7 | * of this software and associated documentation files (the "Software"), to deal 8 | * in the Software without restriction, including without limitation the rights 9 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | * copies of the Software, and to permit persons to whom the Software is 11 | * furnished to do so, subject to the following conditions: 12 | * 13 | * The above copyright notice and this permission notice shall be included in 14 | * all copies or substantial portions of the Software. 15 | * 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | * SOFTWARE. 23 | */ 24 | 25 | // Type definitions for Chart.js 26 | // Project: https://github.com/nnnick/Chart.js 27 | // Definitions by: Alberto Nuti 28 | // Definitions: https://github.com/DefinitelyTyped/DefinitelyTyped 29 | 30 | declare enum ChartType { line, bar, radar, doughnut, polarArea, bubble } 31 | declare enum TimeUnit { 32 | millisecond, 33 | second, 34 | minute, 35 | hour, 36 | day, 37 | week, 38 | month, 39 | quarter, 40 | year 41 | } 42 | interface ChartLegendItem { 43 | text?: string; 44 | fillStyle?: string; 45 | hidden?: boolean; 46 | lineCap?: string; 47 | lineDash?: number[]; 48 | lineDashOffset?: number; 49 | lineJoin?: string; 50 | lineWidth?: number; 51 | strokeStyle?: string; 52 | } 53 | interface ChartTooltipItem { 54 | xLabel?: string; 55 | yLabel?: string; 56 | datasetIndex?: number; 57 | index?: number; 58 | } 59 | interface ChartTooltipCallback { 60 | beforeTitle?: (item?: ChartTooltipItem[], data?: any) => void; 61 | title?: (item?: ChartTooltipItem[], data?: any) => void; 62 | afterTitle?: (item?: ChartTooltipItem[], data?: any) => void; 63 | beforeBody?: (item?: ChartTooltipItem[], data?: any) => void; 64 | beforeLabel?: (tooltipItem?: ChartTooltipItem, data?: any) => void; 65 | label?: (tooltipItem?: ChartTooltipItem, data?: any) => void; 66 | afterLabel?: (tooltipItem?: ChartTooltipItem, data?: any) => void; 67 | afterBody?: (item?: ChartTooltipItem[], data?: any) => void; 68 | beforeFooter?: (item?: ChartTooltipItem[], data?: any) => void; 69 | footer?: (item?: ChartTooltipItem[], data?: any) => void; 70 | afterfooter?: (item?: ChartTooltipItem[], data?: any) => void; 71 | } 72 | interface ChartAnimationParameter { 73 | chartInstance?: any; 74 | animationObject?: any; 75 | } 76 | interface ChartPoint { 77 | x?: number; 78 | y?: number; 79 | } 80 | 81 | interface ChartConfiguration { 82 | type?: string; 83 | data?: ChartData; 84 | options?: ChartOptions; 85 | } 86 | 87 | interface ChartData {} 88 | 89 | interface LinearChartData extends ChartData { 90 | labels?: string[]; 91 | datasets?: ChartDataSets[]; 92 | } 93 | 94 | interface ChartOptions { 95 | responsive?: boolean; 96 | responsiveAnimationDuration?: number; 97 | maintainAspectRatio?: boolean; 98 | events?: string[]; 99 | onClick?: (any?: any) => any; 100 | title?: ChartTitleOptions; 101 | legend?: ChartLegendOptions; 102 | tooltips?: ChartTooltipOptions; 103 | hover?: ChartHoverOptions; 104 | animation?: ChartAnimationOptions; 105 | elements?: ChartElementsOptions; 106 | scales?: ChartScales; 107 | } 108 | 109 | interface ChartFontOptions { 110 | defaultFontColor?: ChartColor; 111 | defaultFontFamily?: string; 112 | defaultFontSize?: number; 113 | defaultFontStyle?: string; 114 | } 115 | 116 | interface ChartTitleOptions { 117 | display?: boolean; 118 | position?: string; 119 | fullWdith?: boolean; 120 | fontSize?: number; 121 | fontFamily?: string; 122 | fontColor?: ChartColor; 123 | fontStyle?: string; 124 | padding?: number; 125 | text?: string; 126 | } 127 | 128 | interface ChartLegendOptions { 129 | display?: boolean; 130 | position?: string; 131 | fullWidth?: boolean; 132 | onClick?: (event: any, legendItem: any) => void; 133 | labels?: ChartLegendLabelOptions; 134 | } 135 | 136 | interface ChartLegendLabelOptions { 137 | boxWidth?: number; 138 | fontSize?: number; 139 | fontStyle?: number; 140 | fontColor?: ChartColor; 141 | fontFamily?: string; 142 | padding?: number; 143 | generateLabels?: (chart: any) => any; 144 | } 145 | 146 | interface ChartTooltipOptions { 147 | enabled?: boolean; 148 | custom?: (a: any) => void; 149 | mode?: string; 150 | backgroundColor?: ChartColor; 151 | titleFontFamily?: string; 152 | titleFontSize?: number; 153 | titleFontStyle?: string; 154 | titleFontColor?: ChartColor; 155 | titleSpacing?: number; 156 | titleMarginBottom?: number; 157 | bodyFontFamily?: string; 158 | bodyFontSize?: number; 159 | bodyFontStyle?: string; 160 | bodyFontColor?: ChartColor; 161 | bodySpacing?: number; 162 | footerFontFamily?: string; 163 | footerFontSize?: number; 164 | footerFontStyle?: string; 165 | footerFontColor?: ChartColor; 166 | footerSpacing?: number; 167 | footerMarginTop?: number; 168 | xPadding?: number; 169 | yPadding?: number; 170 | caretSize?: number; 171 | cornerRadius?: number; 172 | multiKeyBackground?: string; 173 | callbacks?: ChartTooltipCallback; 174 | } 175 | 176 | interface ChartHoverOptions { 177 | mode?: string; 178 | animationDuration?: number; 179 | onHover?: (active: any) => void; 180 | } 181 | 182 | interface ChartAnimationObject { 183 | currentStep?: number; 184 | numSteps?: number; 185 | easing?: string; 186 | render?: (arg: any) => void; 187 | onAnimationProgress?: (arg: any) => void; 188 | onAnimationComplete?: (arg: any) => void; 189 | } 190 | 191 | interface ChartAnimationOptions { 192 | duration?: number; 193 | easing?: string; 194 | onProgress?: (chart: any) => void; 195 | onComplete?: (chart: any) => void; 196 | } 197 | 198 | interface ChartElementsOptions { 199 | point?: ChartPointOptions; 200 | line?: ChartLineOptions; 201 | arg?: ChartArcOtpions; 202 | rectangle?: ChartRectangleOptions; 203 | } 204 | 205 | interface ChartArcOtpions { 206 | backgroundColor?: ChartColor; 207 | borderColor?: ChartColor; 208 | borderWidth?: number; 209 | } 210 | 211 | interface ChartLineOptions { 212 | tension?: number; 213 | backgroundColor?: ChartColor; 214 | borderWidth?: number; 215 | borderColor?: ChartColor; 216 | borderCapStyle?: string; 217 | borderDash?: any[]; 218 | borderDashOffset?: number; 219 | borderJoinStyle?: string; 220 | } 221 | 222 | interface ChartPointOptions { 223 | radius?: number; 224 | pointStyle?: string; 225 | backgroundColor?: ChartColor; 226 | borderWidth?: number; 227 | borderColor?: ChartColor; 228 | hitRadius?: number; 229 | hoverRadius?: number; 230 | hoverBorderWidth?: number; 231 | } 232 | 233 | interface ChartRectangleOptions { 234 | backgroundColor?: ChartColor; 235 | borderWidth?: number; 236 | borderColor?: ChartColor; 237 | borderSkipped?: string; 238 | } 239 | interface GridLineOptions { 240 | display?: boolean; 241 | color?: ChartColor; 242 | lineWidth?: number; 243 | drawBorder?: boolean; 244 | drawOnChartArea?: boolean; 245 | drawticks?: boolean; 246 | tickMarkLength?: number; 247 | zeroLineWidth?: number; 248 | zeroLineColor?: ChartColor; 249 | offsetGridLines?: boolean; 250 | } 251 | 252 | interface ScaleTitleOptions { 253 | display?: boolean; 254 | labelString?: string; 255 | fontColor?: ChartColor; 256 | fontFamily?: string; 257 | fontSize?: number; 258 | fontStyle?: string; 259 | } 260 | 261 | interface TickOptions { 262 | autoSkip?: boolean; 263 | callback?: (value: any, index: any, values: any) => string; 264 | display?: boolean; 265 | fontColor?: ChartColor; 266 | fontFamily?: string; 267 | fontSize?: number; 268 | fontStyle?: string; 269 | labelOffset?: number; 270 | maxRotation?: number; 271 | minRotation?: number; 272 | mirror?: boolean; 273 | padding?: number; 274 | reverse?: boolean; 275 | min?: any; 276 | max?: any; 277 | } 278 | interface AngleLineOptions { 279 | display?: boolean; 280 | color?: ChartColor; 281 | lineWidth?: number; 282 | } 283 | 284 | interface PointLabelOptions { 285 | callback?: (arg: any) => any; 286 | fontColor?: ChartColor; 287 | fontFamily?: string; 288 | fontSize?: number; 289 | fontStyle?: string; 290 | } 291 | 292 | interface TickOptions { 293 | backdropColor?: ChartColor; 294 | backdropPaddingX?: number; 295 | backdropPaddingY?: number; 296 | maxTicksLimit?: number; 297 | showLabelBackdrop?: boolean; 298 | } 299 | interface LinearTickOptions extends TickOptions { 300 | beginAtZero?: boolean; 301 | min?: number; 302 | max?: number; 303 | maxTicksLimit?: number; 304 | stepSize?: number; 305 | suggestedMin?: number; 306 | suggestedMax?: number; 307 | } 308 | 309 | interface LogarithmicTickOptions extends TickOptions { 310 | min?: number; 311 | max?: number; 312 | } 313 | 314 | type ChartColor = string|CanvasGradient|CanvasPattern; 315 | 316 | interface ChartDataSets { 317 | backgroundColor?: ChartColor; 318 | borderWidth?: number; 319 | borderColor?: ChartColor; 320 | borderCapStyle?: string; 321 | borderDash?: number[]; 322 | borderDashOffset?: number; 323 | borderJoinStyle?: string; 324 | data?: number[]|ChartPoint[]; 325 | fill?: boolean; 326 | label?: string; 327 | lineTension?: number; 328 | pointBorderColor?: ChartColor|ChartColor[]; 329 | pointBackgroundColor?: ChartColor|ChartColor[]; 330 | pointBorderWidth?: number|number[]; 331 | pointRadius?: number|number[]; 332 | pointHoverRadius?: number|number[]; 333 | pointHitRadius?: number|number[]; 334 | pointHoverBackgroundColor?: ChartColor|ChartColor[]; 335 | pointHoverBorderColor?: ChartColor|ChartColor[]; 336 | pointHoverBorderWidth?: number|number[]; 337 | pointStyle?: string|string[]|HTMLImageElement|HTMLImageElement[]; 338 | xAxisID?: string; 339 | yAxisID?: string; 340 | } 341 | 342 | interface ChartScales { 343 | type?: string; 344 | display?: boolean; 345 | position?: string; 346 | beforeUpdate?: (scale?: any) => void; 347 | beforeSetDimension?: (scale?: any) => void; 348 | beforeDataLimits?: (scale?: any) => void; 349 | beforeBuildTicks?: (scale?: any) => void; 350 | beforeTickToLabelConversion?: (scale?: any) => void; 351 | beforeCalculateTickRotation?: (scale?: any) => void; 352 | beforeFit?: (scale?: any) => void; 353 | afterUpdate?: (scale?: any) => void; 354 | afterSetDimension?: (scale?: any) => void; 355 | afterDataLimits?: (scale?: any) => void; 356 | afterBuildTicks?: (scale?: any) => void; 357 | afterTickToLabelConversion?: (scale?: any) => void; 358 | afterCalculateTickRotation?: (scale?: any) => void; 359 | afterFit?: (scale?: any) => void; 360 | gridLines?: GridLineOptions; 361 | scaleLabel?: ScaleTitleOptions; 362 | ticks?: TickOptions; 363 | xAxes?: ChartXAxe[]; 364 | yAxes?: ChartYAxe[]; 365 | } 366 | 367 | interface ChartXAxe { 368 | type?: string; 369 | display?: boolean; 370 | id?: string; 371 | stacked?: boolean; 372 | categoryPercentage?: number; 373 | barPercentage?: number; 374 | barThickness?: number; 375 | gridLines?: GridLineOptions; 376 | position?: string; 377 | ticks?: TickOptions; 378 | time?: TimeScale; 379 | scaleLabel?: ScaleTitleOptions; 380 | } 381 | 382 | interface ChartYAxe { 383 | type?: string; 384 | display?: boolean; 385 | id?: string; 386 | stacked?: boolean; 387 | position?: string; 388 | ticks?: TickOptions; 389 | scaleLabel?: ScaleTitleOptions; 390 | } 391 | 392 | interface LinearScale extends ChartScales { 393 | ticks?: LinearTickOptions; 394 | } 395 | 396 | interface LogarithmicScale extends ChartScales { 397 | ticks?: LogarithmicTickOptions; 398 | } 399 | 400 | interface TimeScale extends ChartScales { 401 | format?: string; 402 | displayFormats?: string; 403 | isoWeekday?: boolean; 404 | max?: string; 405 | min?: string; 406 | parser?: string|((arg: any) => any); 407 | round?: string; 408 | tooltipFormat?: string; 409 | unit?: string|TimeUnit; 410 | unitStepSize?: number; 411 | } 412 | 413 | interface RadialLinearScale { 414 | lineArc?: boolean; 415 | angleLines?: AngleLineOptions; 416 | pointLabels?: PointLabelOptions; 417 | ticks?: TickOptions; 418 | } 419 | 420 | declare class Chart { 421 | constructor(context: CanvasRenderingContext2D, options: ChartConfiguration); 422 | config: ChartConfiguration; 423 | destroy: () => {}; 424 | update: (duration?: any, lazy?: any) => {}; 425 | render: (duration?: any, lazy?: any) => {}; 426 | stop: () => {}; 427 | resize: () => {}; 428 | clear: () => {}; 429 | toBase64: () => string; 430 | generateLegend: () => {}; 431 | getElementAtEvent: (e: any) => {}; 432 | getElementsAtEvent: (e: any) => {}[]; 433 | getDatasetAtEvent: (e: any) => {}[]; 434 | 435 | defaults: {global: ChartOptions;} 436 | } 437 | -------------------------------------------------------------------------------- /lib/polymer-spec.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * @license 3 | * Copyright 2017 Google Inc. All Rights Reserved. 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | * ============================================================================= 16 | */ 17 | 18 | /** 19 | * @fileoverview 20 | * 21 | * Defines an interface for creating Polymer elements in Typescript with the 22 | * correct typings. A Polymer element should be defined like this: 23 | * 24 | * ``` 25 | * let MyElementPolymer = PolymerElement({ 26 | * is: 'my-polymer-element', 27 | * properties: { 28 | * foo: string, 29 | * bar: Array 30 | * } 31 | * }); 32 | * 33 | * class MyElement extends MyElementPolymer { 34 | * foo: string; 35 | * bar: number[]; 36 | * 37 | * ready() { 38 | * console.log('MyElement initialized!'); 39 | * } 40 | * } 41 | * 42 | * document.registerElement(MyElement.prototype.is, MyElement); 43 | * ``` 44 | */ 45 | 46 | export type Spec = { 47 | is: string; properties: { 48 | [key: string]: (Function|{ 49 | // tslint:disable-next-line:no-any 50 | type: Function, value?: any; 51 | reflectToAttribute?: boolean; 52 | readonly?: boolean; 53 | notify?: boolean; 54 | computed?: string; 55 | observer?: string; 56 | }) 57 | }; 58 | observers?: string[]; 59 | }; 60 | 61 | export function PolymerElement(spec: Spec) { 62 | // tslint:disable-next-line:no-any 63 | return Polymer.Class(spec as any) as {new (): PolymerHTMLElement}; 64 | } 65 | 66 | export interface PolymerHTMLElement extends HTMLElement, polymer.Base {} 67 | -------------------------------------------------------------------------------- /lib/support.js: -------------------------------------------------------------------------------- 1 | /** 2 | * @license 3 | * Copyright 2017 Google Inc. All Rights Reserved. 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | * ============================================================================= 16 | */ 17 | function isWebGLEnabled() { 18 | var canvas = document.createElement('canvas'); 19 | 20 | var attributes = { 21 | alpha: false, 22 | antialias: false, 23 | premultipliedAlpha: false, 24 | preserveDrawingBuffer: false, 25 | depth: false, 26 | stencil: false, 27 | failIfMajorPerformanceCaveat: true 28 | }; 29 | return null != (canvas.getContext('webgl', attributes) || 30 | canvas.getContext('experimental-webgl', attributes)); 31 | } 32 | 33 | function buildAndShowDialog(title, content) { 34 | var dialogContainer = document.createElement('div'); 35 | dialogContainer.innerHTML = ` 36 | 37 |

${title}

38 |
39 |

${content}

40 |
41 |
42 | `; 43 | document.body.appendChild(dialogContainer); 44 | var dialog = document.getElementById('dialog'); 45 | dialog.style.width = '430px'; 46 | dialogPolyfill.registerDialog(dialog); 47 | dialog.showModal(); 48 | } 49 | 50 | function inializePolymerPage() { 51 | document.addEventListener('WebComponentsReady', function(event) { 52 | if (!isWebGLEnabled()) { 53 | const title = `Check if hardware acceleration is enabled.`; 54 | const content = ` 55 | Looks like your device is supported but settings aren't in place. 56 | Please check if WebGL is enabled for your browser. 57 | 58 | See: How can I enable WebGL in my browser? 59 | `; 60 | buildAndShowDialog(title, content); 61 | } else { 62 | var bundleScript = document.createElement('script'); 63 | bundleScript.src = 'bundle.js'; 64 | document.head.appendChild(bundleScript); 65 | } 66 | }); 67 | } 68 | inializePolymerPage(); 69 | -------------------------------------------------------------------------------- /output/README.md: -------------------------------------------------------------------------------- 1 | ### Deploying Your Demo 2 | 3 | Run the following command: 4 | 5 | ```bash 6 | $ ./scripts/deploy-demo 7 | Stored bundle in output/bundle.js 8 | Saved bundled demo at output/index.html 9 | ``` 10 | 11 | It will create two files in this folder: `index.html` and `bundle.js`. 12 | Then, place these files into a server with `figures/`, `images/`, and `pretrained_models/`. 13 | Please see the branch `gh-pages` for an example. 14 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "ganlab", 3 | "version": "0.2.1", 4 | "description": "An Interactive, Visual Experimentation Tool for GANs", 5 | "private": false, 6 | "dependencies": { 7 | "@bower_components/font-roboto": "PolymerElements/font-roboto#^1.0.1", 8 | "@bower_components/iron-a11y-announcer": "PolymerElements/iron-a11y-announcer#^1.0.0", 9 | "@bower_components/iron-a11y-keys-behavior": "PolymerElements/iron-a11y-keys-behavior#^1.0.0", 10 | "@bower_components/iron-autogrow-textarea": "PolymerElements/iron-autogrow-textarea#^1.0.0", 11 | "@bower_components/iron-behaviors": "polymerelements/iron-behaviors#^1.0.0", 12 | "@bower_components/iron-checked-element-behavior": "PolymerElements/iron-checked-element-behavior#^1.0.0", 13 | "@bower_components/iron-collapse": "PolymerElements/iron-collapse#^1.0.0", 14 | "@bower_components/iron-dropdown": "PolymerElements/iron-dropdown#^1.0.0", 15 | "@bower_components/iron-fit-behavior": "PolymerElements/iron-fit-behavior#^1.0.0", 16 | "@bower_components/iron-flex-layout": "PolymerElements/iron-flex-layout#~1.3.0", 17 | "@bower_components/iron-form-element-behavior": "PolymerElements/iron-form-element-behavior#^1.0.0", 18 | "@bower_components/iron-icon": "PolymerElements/iron-icon#^1.0.0", 19 | "@bower_components/iron-icons": "PolymerElements/iron-icons#1.*.*", 20 | "@bower_components/iron-iconset-svg": "PolymerElements/iron-iconset-svg#^1.0.0", 21 | "@bower_components/iron-input": "PolymerElements/iron-input#^1.0.0", 22 | "@bower_components/iron-menu-behavior": "PolymerElements/iron-menu-behavior#^1.1.7", 23 | "@bower_components/iron-meta": "PolymerElements/iron-meta#^1.0.0", 24 | "@bower_components/iron-overlay-behavior": "PolymerElements/iron-overlay-behavior#^1.7.0", 25 | "@bower_components/iron-range-behavior": "PolymerElements/iron-range-behavior#^1.0.0", 26 | "@bower_components/iron-resizable-behavior": "PolymerElements/iron-resizable-behavior#^1.0.0", 27 | "@bower_components/iron-selector": "PolymerElements/iron-selector#^1.0.0", 28 | "@bower_components/iron-validatable-behavior": "PolymerElements/iron-validatable-behavior#^1.0.0", 29 | "@bower_components/neon-animation": "PolymerElements/neon-animation#^1.0.0", 30 | "@bower_components/paper-behaviors": "PolymerElements/paper-behaviors#^1.0.0", 31 | "@bower_components/paper-button": "PolymerElements/paper-button#1.*.*", 32 | "@bower_components/paper-checkbox": "PolymerElements/paper-checkbox#1.*.*", 33 | "@bower_components/paper-dialog": "PolymerElements/paper-dialog#1.*.*", 34 | "@bower_components/paper-dialog-behavior": "PolymerElements/paper-dialog-behavior#^1.0.0", 35 | "@bower_components/paper-dialog-scrollable": "PolymerElements/paper-dialog-scrollable#1.*.*", 36 | "@bower_components/paper-dropdown-menu": "PolymerElements/paper-dropdown-menu#1.*.*", 37 | "@bower_components/paper-icon-button": "PolymerElements/paper-icon-button#1.*.*", 38 | "@bower_components/paper-input": "PolymerElements/paper-input#^1.0.9", 39 | "@bower_components/paper-item": "PolymerElements/paper-item#1.*.*", 40 | "@bower_components/paper-listbox": "PolymerElements/paper-listbox#1.*.*", 41 | "@bower_components/paper-material": "PolymerElements/paper-material#^1.0.0", 42 | "@bower_components/paper-menu-button": "PolymerElements/paper-menu-button#^1.3.0", 43 | "@bower_components/paper-progress": "PolymerElements/paper-progress#1.*.*", 44 | "@bower_components/paper-radio-button": "PolymerElements/paper-radio-button#1.*.*", 45 | "@bower_components/paper-radio-group": "PolymerElements/paper-radio-group#1.*.*", 46 | "@bower_components/paper-ripple": "PolymerElements/paper-ripple#^1.0.0", 47 | "@bower_components/paper-slider": "PolymerElements/paper-slider#1.*.*", 48 | "@bower_components/paper-spinner": "PolymerElements/paper-spinner#1.*.*", 49 | "@bower_components/paper-styles": "PolymerElements/paper-styles#^1.0.0", 50 | "@bower_components/paper-toggle-button": "PolymerElements/paper-toggle-button#1.*.*", 51 | "@bower_components/paper-tooltip": "PolymerElements/paper-tooltip#1.*.*", 52 | "@bower_components/polymer": "Polymer/polymer#1.*.*", 53 | "@bower_components/web-animations-js": "web-animations/web-animations-js#^2.2.0", 54 | "@bower_components/webcomponentsjs": "webcomponents/webcomponentsjs#^0.7.24", 55 | "@tensorflow/tfjs-core": "^0.12.0", 56 | "chart.js": "~2.7.1", 57 | "tslint": "~5.8.0" 58 | }, 59 | "repository": { 60 | "type": "git", 61 | "url": "https://github.com/poloclub/ganlab.git" 62 | }, 63 | "devDependencies": { 64 | "@types/d3": "~4.10.0", 65 | "@types/d3-scale-chromatic": "~1.1.0", 66 | "@types/d3-transition": "~1.1.1", 67 | "@types/polymer": "~1.1.31", 68 | "browserify": "~14.4.0", 69 | "clang-format": "~1.2.2", 70 | "cross-spawn": "~5.1.0", 71 | "d3-array": "~1.2.1", 72 | "d3-format": "~1.2.1", 73 | "d3-scale": "~1.0.7", 74 | "d3-scale-chromatic": "~1.1.1", 75 | "d3-selection": "~1.2.0", 76 | "d3-shape": "~1.2.0", 77 | "d3-transition": "~1.1.1", 78 | "fetch-mock": "~5.13.1", 79 | "handlebars": "~4.0.11", 80 | "highlight.js": "9.12.0", 81 | "http-server": "~0.10.0", 82 | "jasmine-core": "~2.6.4", 83 | "minimist": "~1.2.0", 84 | "mkdirp": "~0.5.1", 85 | "opn": "~5.1.0", 86 | "poi": "~9.6.8", 87 | "poi-preset-typescript": "~9.0.2", 88 | "polymer-bundler": "~3.0.1", 89 | "shelljs": "~0.7.8", 90 | "tsify": "~3.0.3", 91 | "typescript": "2.7.2", 92 | "uglifyjs": "~2.4.11", 93 | "watchify": "~3.9.0" 94 | }, 95 | "resolutions": { 96 | "@types/webcomponents.js": "<=0.6.32" 97 | }, 98 | "scripts": { 99 | "prep": "yarn", 100 | "build": "tsc", 101 | "lint": "tslint -p . -t verbose" 102 | }, 103 | "license": "Apache-2.0", 104 | "engines": { 105 | "yarn": ">= 1.0.0" 106 | } 107 | } 108 | -------------------------------------------------------------------------------- /scripts/build-demo: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | // Copyright 2017 Google Inc. All Rights Reserved. 3 | // Modifications Copyright 2018 Minsuk Kahng. 4 | // 5 | // Licensed under the Apache License, Version 2.0 (the "License"); 6 | // you may not use this file except in compliance with the License. 7 | // You may obtain a copy of the License at 8 | // 9 | // http://www.apache.org/licenses/LICENSE-2.0 10 | // 11 | // Unless required by applicable law or agreed to in writing, software 12 | // distributed under the License is distributed on an "AS IS" BASIS, 13 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | // See the License for the specific language governing permissions and 15 | // limitations under the License. 16 | // ============================================================================= 17 | 18 | const path = require('path'); 19 | const spawn = require('cross-spawn'); 20 | const shell = require('shelljs'); 21 | 22 | const tsPath = path.join('demo', 'ganlab.ts'); 23 | const outputPath = path.join('output', 'bundle.js'); 24 | 25 | const cmd = path.join('node_modules', '.bin', 'browserify'); 26 | const child = spawn(cmd, 27 | [tsPath, '-p', '[tsify', '-p', 'tsconfig.json]', '-o' , outputPath], 28 | {detached: false}); 29 | child.stdout.pipe(process.stdout); 30 | child.stderr.pipe(process.stderr); 31 | child.on('close', () => console.log(`Stored bundle in ${outputPath}`)); 32 | -------------------------------------------------------------------------------- /scripts/deploy-demo: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | // Copyright 2017 Google Inc. All Rights Reserved. 3 | // Modifications Copyright 2018 Minsuk Kahng. 4 | // 5 | // Licensed under the Apache License, Version 2.0 (the "License"); 6 | // you may not use this file except in compliance with the License. 7 | // You may obtain a copy of the License at 8 | // 9 | // http://www.apache.org/licenses/LICENSE-2.0 10 | // 11 | // Unless required by applicable law or agreed to in writing, software 12 | // distributed under the License is distributed on an "AS IS" BASIS, 13 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | // See the License for the specific language governing permissions and 15 | // limitations under the License. 16 | // ============================================================================= 17 | 18 | const path = require('path'); 19 | const spawn = require('cross-spawn'); 20 | 21 | const demoFilePath = path.join('demo', 'index.html') 22 | const bundlePath = path.join('output', 'index.html') 23 | 24 | const cmd = path.join('scripts', 'build-demo'); 25 | const child = spawn(cmd, [], {detached: false}); 26 | 27 | child.stdout.pipe(process.stdout); 28 | child.stderr.pipe(process.stderr); 29 | 30 | child.on('close', () => { 31 | const cmd = path.join('node_modules', '.bin', 'polymer-bundler'); 32 | const child = spawn(cmd, ['--inline-scripts', '--inline-css', 33 | '--out-html', bundlePath, demoFilePath], {detached: false}); 34 | child.stdout.pipe(process.stdout); 35 | child.stderr.pipe(process.stderr); 36 | child.on('close', () => console.log(`Saved bundled demo at ${bundlePath}`)); 37 | }); -------------------------------------------------------------------------------- /scripts/watch-demo: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | // Copyright 2017 Google Inc. All Rights Reserved. 3 | // Modifications Copyright 2018 Minsuk Kahng. 4 | // 5 | // Licensed under the Apache License, Version 2.0 (the "License"); 6 | // you may not use this file except in compliance with the License. 7 | // You may obtain a copy of the License at 8 | // 9 | // http://www.apache.org/licenses/LICENSE-2.0 10 | // 11 | // Unless required by applicable law or agreed to in writing, software 12 | // distributed under the License is distributed on an "AS IS" BASIS, 13 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | // See the License for the specific language governing permissions and 15 | // limitations under the License. 16 | // ============================================================================= 17 | 18 | const path = require('path'); 19 | const spawn = require('cross-spawn'); 20 | const opn = require('opn'); 21 | const shell = require('shelljs'); 22 | 23 | const demoPath = 'demo'; 24 | const outputPath = path.join(demoPath, 'bundle.js') 25 | const tsPath = path.join(demoPath, 'ganlab.ts'); 26 | 27 | const cmd = path.join('node_modules', '.bin', 'watchify'); 28 | 29 | const tsifyCmd = ['tsify', '-p', 'tsconfig-dev.json']; 30 | 31 | // tsifyCmd is passed to the watchify command, so the arguments should be 32 | // wrapped in []. 33 | tsifyCmd[0] = '[' + tsifyCmd[0]; 34 | tsifyCmd[tsifyCmd.length - 1] += ']'; 35 | 36 | const watchify = spawn(cmd, [tsPath, '-p', ...tsifyCmd, '-v', 37 | '--debug', '-o' , outputPath], {detached: false}); 38 | watchify.stdout.pipe(process.stdout); 39 | watchify.stderr.pipe(process.stderr); 40 | 41 | let httpServerStarted = false; 42 | 43 | console.log('Waiting for initial compile...'); 44 | watchify.stderr.on('data', (data) => { 45 | if (data.toString().includes(`bytes written to`)) { 46 | if (!httpServerStarted) { 47 | const httpCmd = path.join('node_modules', '.bin', 'http-server'); 48 | const httpServer = spawn(httpCmd, ['-c-1'], { detached: false}); 49 | 50 | httpServer.stdout.on('data', data => { 51 | data = data.toString().split('\n')[0].trim(); 52 | if (data.startsWith('http://127.0.0.1:')) { 53 | // Open a browser pointing to the demo. 54 | opn(path.join(data, demoPath)); 55 | } 56 | }); 57 | 58 | httpServer.stdout.pipe(process.stdout); 59 | httpServer.stderr.pipe(process.stderr); 60 | httpServerStarted = true; 61 | } 62 | } 63 | }); -------------------------------------------------------------------------------- /tsconfig-dev.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "tsconfig.json", 3 | "include": [ 4 | "**/*" 5 | ], 6 | "compilerOptions": { 7 | "declaration": false, 8 | "target": "es2017" 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "include": ["**/*"], 3 | "compilerOptions": { 4 | "declaration": false, 5 | "target": "es2017" 6 | } 7 | } 8 | --------------------------------------------------------------------------------