├── web
├── ._keras.js
├── ._weblas.js
├── spinner.png
├── hintbot_weights.buf
├── hintbot_metadata.json
├── style.css
├── hintbot.json
├── hintbot.js
└── weblas.js
├── readme_images
├── comparison.png
└── model_progress.png
├── README.md
├── colorutils.py
├── index.html
├── .gitignore
├── encoder.py
├── data.py
└── hintbot.py
/web/._keras.js:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/madebyollin/hintbot/HEAD/web/._keras.js
--------------------------------------------------------------------------------
/web/._weblas.js:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/madebyollin/hintbot/HEAD/web/._weblas.js
--------------------------------------------------------------------------------
/web/spinner.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/madebyollin/hintbot/HEAD/web/spinner.png
--------------------------------------------------------------------------------
/web/hintbot_weights.buf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/madebyollin/hintbot/HEAD/web/hintbot_weights.buf
--------------------------------------------------------------------------------
/readme_images/comparison.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/madebyollin/hintbot/HEAD/readme_images/comparison.png
--------------------------------------------------------------------------------
/readme_images/model_progress.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/madebyollin/hintbot/HEAD/readme_images/model_progress.png
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # hintbot
2 | A CNN written in keras to hint icons automatically. Live in-browser at [madebyollin.github.io/hintbot/](https://madebyollin.github.io/hintbot/). Blog post at [madebyollin.com/posts/hintbot/](http://madebyollin.com/posts/hintbot/).
3 |
--------------------------------------------------------------------------------
/colorutils.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from skimage import color
3 | import skimage.io as io
4 |
5 | def RGBAtoHSVA(image):
6 | rgb = image[:,:,:3]
7 | a = image[:,:,3:] / 255.0
8 | hsv = color.rgb2hsv(rgb)
9 | hsva = np.concatenate((hsv,a),axis=2)
10 | return hsva
11 |
12 | def HSVAtoRGBA(image):
13 | # print("image:", image)
14 | hsv = image[:,:,:3]
15 | # print("hsv:", hsv)
16 | a = image[:,:,3:]
17 | hsv = hsv.clip(0,1)
18 | # print("hsvclip:", hsv)
19 | a = a.clip(0,1)
20 | # print("aclip:", hsv)
21 | rgb = color.hsv2rgb(hsv)
22 | a = a
23 | rgba = np.concatenate((rgb,a),axis=2) * 255.0
24 | # print("rgba:", hsv)
25 | rgba = rgba.clip(0,255).astype(np.uint8)
26 | return rgba
27 |
28 | def test():
29 | rgba = io.imread("debug.png")
30 | hsva = RGBAtoHSVA(rgba)
31 | noise = np.random.normal(0,0.01,rgba.shape)
32 | hsva += noise
33 | io.imsave("debug_rgbaconvert.png", HSVAtoRGBA(hsva))
34 |
--------------------------------------------------------------------------------
/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | Hintbot, the Icon Hinter
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
![]()
20 |
![]()
21 |
22 |
23 | HintBot is a work-in-progress neural network to hints icons in the browser.
24 |
Read the Post!
25 |
26 |
29 |
30 |
31 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # input data
2 | input_iconsets/
3 |
4 | # weights
5 | *.h5
6 | # images
7 | *.xcf
8 | # pythonHistory
9 | pythonHistory
10 |
11 | # Byte-compiled / optimized / DLL files
12 | __pycache__/
13 | *.py[cod]
14 | *$py.class
15 |
16 | # C extensions
17 | *.so
18 |
19 | # Distribution / packaging
20 | .Python
21 | env/
22 | build/
23 | develop-eggs/
24 | dist/
25 | downloads/
26 | eggs/
27 | .eggs/
28 | lib/
29 | lib64/
30 | parts/
31 | sdist/
32 | var/
33 | *.egg-info/
34 | .installed.cfg
35 | *.egg
36 |
37 | # PyInstaller
38 | # Usually these files are written by a python script from a template
39 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
40 | *.manifest
41 | *.spec
42 |
43 | # Installer logs
44 | pip-log.txt
45 | pip-delete-this-directory.txt
46 |
47 | # Unit test / coverage reports
48 | htmlcov/
49 | .tox/
50 | .coverage
51 | .coverage.*
52 | .cache
53 | nosetests.xml
54 | coverage.xml
55 | *,cover
56 | .hypothesis/
57 |
58 | # Translations
59 | *.mo
60 | *.pot
61 |
62 | # Django stuff:
63 | *.log
64 | local_settings.py
65 |
66 | # Flask stuff:
67 | instance/
68 | .webassets-cache
69 |
70 | # Scrapy stuff:
71 | .scrapy
72 |
73 | # Sphinx documentation
74 | docs/_build/
75 |
76 | # PyBuilder
77 | target/
78 |
79 | # IPython Notebook
80 | .ipynb_checkpoints
81 |
82 | # pyenv
83 | .python-version
84 |
85 | # celery beat schedule file
86 | celerybeat-schedule
87 |
88 | # dotenv
89 | .env
90 |
91 | # virtualenv
92 | venv/
93 | ENV/
94 |
95 | # Spyder project settings
96 | .spyderproject
97 |
98 | # Rope project settings
99 | .ropeproject
100 |
--------------------------------------------------------------------------------
/web/hintbot_metadata.json:
--------------------------------------------------------------------------------
1 | [{"type": "float32", "offset": 0, "layer_name": "convolution2d_7", "shape": [4, 4, 4, 64], "weight_name": "convolution2d_7_W", "length": 4096}, {"type": "float32", "offset": 16384, "layer_name": "convolution2d_7", "shape": [64], "weight_name": "convolution2d_7_b", "length": 64}, {"type": "float32", "offset": 16640, "layer_name": "convolution2d_8", "shape": [4, 4, 64, 64], "weight_name": "convolution2d_8_W", "length": 65536}, {"type": "float32", "offset": 278784, "layer_name": "convolution2d_8", "shape": [64], "weight_name": "convolution2d_8_b", "length": 64}, {"type": "float32", "offset": 279040, "layer_name": "convolution2d_9", "shape": [4, 4, 64, 64], "weight_name": "convolution2d_9_W", "length": 65536}, {"type": "float32", "offset": 541184, "layer_name": "convolution2d_9", "shape": [64], "weight_name": "convolution2d_9_b", "length": 64}, {"type": "float32", "offset": 541440, "layer_name": "convolution2d_10", "shape": [4, 4, 64, 64], "weight_name": "convolution2d_10_W", "length": 65536}, {"type": "float32", "offset": 803584, "layer_name": "convolution2d_10", "shape": [64], "weight_name": "convolution2d_10_b", "length": 64}, {"type": "float32", "offset": 803840, "layer_name": "convolution2d_11", "shape": [4, 4, 64, 16], "weight_name": "convolution2d_11_W", "length": 16384}, {"type": "float32", "offset": 869376, "layer_name": "convolution2d_11", "shape": [16], "weight_name": "convolution2d_11_b", "length": 16}, {"type": "float32", "offset": 869440, "layer_name": "convolution2d_12", "shape": [4, 4, 16, 4], "weight_name": "convolution2d_12_W", "length": 1024}, {"type": "float32", "offset": 873536, "layer_name": "convolution2d_12", "shape": [4], "weight_name": "convolution2d_12_b", "length": 4}]
--------------------------------------------------------------------------------
/web/style.css:
--------------------------------------------------------------------------------
1 | body {
2 | font-family: "Inconsolata", "Monaco", sans-serif;
3 | font-size: 1.2em;
4 | min-width: 640px;
5 | margin: 0;
6 | padding: 0;
7 | position: relative;
8 | }
9 |
10 | * {
11 | box-sizing: border-box;
12 | }
13 |
14 | .hidden {
15 | visibility: hidden;
16 | }
17 |
18 | #fileInputWrapper {
19 | margin-top: 10vh;
20 | text-align: center;
21 | height: 1em;
22 | transition: .1s ease all;
23 | }
24 |
25 | #fileInput {
26 | width: 0.1px;
27 | height: 0.1px;
28 | opacity: 0;
29 | overflow: hidden;
30 | position: absolute;
31 | z-index: -1;
32 | }
33 |
34 | #inputLabel {
35 | cursor: pointer;
36 | }
37 |
38 | .button {
39 | font-size: 1.5em;
40 | padding: .5em 1em;
41 | line-height: 1.5em;
42 | color: white;
43 | background: rgb(32, 160, 255);
44 | text-decoration: none;
45 | border-radius: 128px;
46 | transition: .1s ease all;
47 | box-shadow: 0px 8px 0px -8px white;
48 | }
49 |
50 | .button:hover {
51 | box-shadow: 0px 8px 16px -8px rgba(32, 160, 255, 0.5);
52 | font-size: 1.525em;
53 | }
54 |
55 | .button:active {
56 | box-shadow: 0px 8px 16px -8px white;
57 | background: rgb(32, 128, 240);
58 | font-size: 1.475em;
59 | color: hsl(0, 0%, 95%);
60 | }
61 |
62 | a.button input {
63 | background: transparent;
64 | color: inherit;
65 | font-family: inherit;
66 | font-size: inherit;
67 | border: none;
68 | padding: none;
69 | }
70 |
71 | .bigButton {
72 | font-size: 1.5em;
73 | cursor: pointer;
74 | }
75 |
76 | #spinner {
77 | display: block;
78 | position: absolute;
79 | left:50%;
80 | margin-left: -32px;
81 | top: 20vh;
82 | background-image: url("spinner.png");
83 | width: 64px;
84 | height: 64px;
85 | background-size: contain;
86 | animation: spin 1s infinite linear;
87 | }
88 |
89 | @keyframes spin {
90 | 0% {
91 | transform: rotate(0deg)
92 | }
93 | 100% {
94 | -webkit-transform: rotate(359deg);
95 | transform: rotate(359deg)
96 | }
97 | }
98 |
99 | @keyframes fade {
100 | 0% {
101 | opacity: 1;
102 | }
103 | 100% {
104 | opacity: 0;
105 | }
106 | }
107 |
108 | #footer {
109 | display: block;
110 | position: fixed;
111 | bottom: 0px;
112 | text-align: center;
113 | font-size: 0.8em;
114 | padding: 1.5em;
115 | width: 100%;
116 | color: hsl(0, 0%, 50%);
117 | z-index: -1;
118 | }
119 |
120 | #display {
121 | margin-top: 15vh !important;
122 | margin-bottom: 5vh !important;
123 | text-align: center;
124 | margin: 0 auto;
125 | width: 400px;
126 | }
127 |
128 | #description {
129 | width: 256px;
130 | margin: 0 auto;
131 | color: hsl(0, 0%, 50%);
132 | }
133 |
134 | #display img {
135 | width:16px;
136 | height:16px
137 | }
138 |
139 | #footer a,
140 | #description a {
141 | text-decoration: none;
142 | color: rgb(32, 160, 255);
143 | }
144 |
145 | #footer a:hover,
146 | #description a:hover {
147 | text-decoration: none;
148 | color: rgb(32, 128, 240);
149 | }
150 |
--------------------------------------------------------------------------------
/encoder.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import os
3 | import h5py
4 | import numpy as np
5 | import json
6 |
7 |
8 | class Encoder(object):
9 | """Encoder class.
10 | Weights are serialized sequentially from the Keras flattened_layers representation
11 | into:
12 | - `weights`: a binary string representing the raw data bytes in float32
13 | of all weights, sequentially concatenated.
14 | - `metadata`: a list containing the byte length and tensor shape,
15 | so that the original tensors can be reconstructed
16 | """
17 |
18 | def __init__(self, weights_hdf5_filepath):
19 | if not weights_hdf5_filepath:
20 | raise Exception('weights_hdf5_filepath must be defined.')
21 | self.weights_hdf5_filepath = weights_hdf5_filepath
22 | self.weights = b''
23 | self.metadata = []
24 |
25 | def serialize(self):
26 | """serialize method.
27 | Strategy for extracting the weights is adapted from the
28 | load_weights_from_hdf5_group method of the Container class:
29 | see https://github.com/fchollet/keras/blob/master/keras/engine/topology.py#L2505-L2585
30 | """
31 | hdf5_file = h5py.File(self.weights_hdf5_filepath, mode='r')
32 | if 'layer_names' not in hdf5_file.attrs and 'model_weights' in hdf5_file:
33 | f = hdf5_file['model_weights']
34 | else:
35 | f = hdf5_file
36 |
37 | layer_names = [n.decode('utf8') for n in f.attrs['layer_names']]
38 | offset = 0
39 | for layer_name in layer_names:
40 | g = f[layer_name]
41 | weight_names = [n.decode('utf8') for n in g.attrs['weight_names']]
42 | if len(weight_names):
43 | for weight_name in weight_names:
44 | meta = {}
45 | meta['layer_name'] = layer_name
46 | meta['weight_name'] = weight_name
47 | weight_value = g[weight_name].value
48 | bytearr = weight_value.astype(np.float32).tobytes()
49 | self.weights += bytearr
50 | meta['offset'] = offset
51 | meta['length'] = len(bytearr) // 4
52 | meta['shape'] = list(weight_value.shape)
53 | meta['type'] = 'float32'
54 | self.metadata.append(meta)
55 | offset += len(bytearr)
56 |
57 | hdf5_file.close()
58 |
59 | def save(self):
60 | """Saves weights data (binary) and weights metadata (json)
61 | """
62 | weights_filepath = '{}_weights.buf'.format(os.path.splitext(self.weights_hdf5_filepath)[0])
63 | with open(weights_filepath, mode='wb') as f:
64 | f.write(self.weights)
65 | metadata_filepath = '{}_metadata.json'.format(os.path.splitext(self.weights_hdf5_filepath)[0])
66 | with open(metadata_filepath, mode='w') as f:
67 | json.dump(self.metadata, f)
68 |
69 |
70 | if __name__ == '__main__':
71 | """
72 | Usage:
73 | python encoder.py example.hdf5
74 |
75 | Output:
76 | - example_weights.buf
77 | - example_metadata.json
78 | """
79 | encoder = Encoder(*sys.argv[1:])
80 | encoder.serialize()
81 | encoder.save()
82 |
--------------------------------------------------------------------------------
/data.py:
--------------------------------------------------------------------------------
1 | import os
2 | import skimage.io as io
3 | from sklearn.utils import shuffle
4 | from scipy.misc import imresize
5 | import numpy as np
6 | import colorutils
7 |
8 | inputImages = []
9 | targetImages = []
10 |
11 | # for each folder in datadir
12 | # filter for images that have "@2x" in their name
13 | # if the version without the @2x isn't present, ignore them
14 | # add the large version of the image to the main data array
15 | # and the small version of the image to the result array
16 | def loadImages(datadir, maxDirectoryCount=10, split=0.9):
17 | for dirPath, dirNames, fileNames in os.walk(datadir):
18 | fileNames = [f for f in fileNames if not f[0] == '.']
19 | dirNames[:] = [d for d in dirNames if not d[0] == '.']
20 | if (maxDirectoryCount != 0):
21 | fullSizeFileNames = [fileName for fileName in fileNames if fileName.endswith("@2x.png") and (fileName.replace("@2x","") in fileNames)]
22 | for fullSizeFileName in fullSizeFileNames:
23 | inputImage = io.imread(dirPath + "/" + fullSizeFileName)
24 | targetImage = io.imread(dirPath + "/" + fullSizeFileName.replace("@2x",""))
25 | # print(dirPath + "/" + fullSizeFileName)
26 | inputSlices, targetSlices = sliceImages(inputImage, targetImage)
27 | # print("got", len(inputSlices), "input splices and",len(targetSlices),"targetSlices")
28 | inputImages.extend(inputSlices)
29 | targetImages.extend(targetSlices)
30 | maxDirectoryCount -= 1
31 | x, y = np.asarray(inputImages), np.asarray(targetImages)
32 | x_train = x[:int(len(x) * split)]
33 | y_train = y[:int(len(y) * split)]
34 | x_test = x[int(len(x) * split):]
35 | y_test = y[int(len(y) * split):]
36 | # Shuffle training data so that repeats aren't in the same batch
37 | # x_train, y_train = shuffle(x_train, y_train, random_state=0)
38 | return (x_train, y_train, x_test, y_test)
39 |
40 | def sliceImages(inputImage, targetImage):
41 | inputSlices = []
42 | targetSlices = []
43 | sliceSize = 32
44 | for y in range(0,inputImage.shape[1]//sliceSize):
45 | for x in range(0,inputImage.shape[0]//sliceSize):
46 | inputSlice = inputImage[x*sliceSize:(x+1)*sliceSize,y*sliceSize:(y+1)*sliceSize]
47 | targetSlice = targetImage[x*sliceSize//2:(x+1)*sliceSize//2,y*sliceSize//2:(y+1)*sliceSize//2]
48 | # only add slices if they're not just empty space
49 | # if (np.any(targetSlice)):
50 | # Reweight smaller sizes
51 | # for i in range(0,max(1,128//inputImage.shape[1])**2):
52 | inputSlices.append(inputSlice)
53 | targetSlices.append(targetSlice)
54 | # inputSlices.append(np.fliplr(inputSlice))
55 | # targetSlices.append(np.fliplr(targetSlice))
56 | # inputSlices.append(np.flipud(inputSlice))
57 | # targetSlices.append(np.flipud(targetSlice))
58 |
59 | # naiveSlice = imresize(inputSlice, 0.5)
60 | # deltaSlice = targetSlice - naiveSlice
61 | # targetSlices.append(deltaSlice)
62 | # return two arrays of images in a tuple
63 | return (inputSlices, targetSlices)
64 |
--------------------------------------------------------------------------------
/web/hintbot.json:
--------------------------------------------------------------------------------
1 | {"keras_version": "1.2.1", "class_name": "Model", "config": {"layers": [{"class_name": "InputLayer", "name": "icon_goes_here", "inbound_nodes": [], "config": {"sparse": false, "batch_input_shape": [null, 32, 32, 4], "input_dtype": "float32", "name": "icon_goes_here"}}, {"class_name": "Convolution2D", "name": "convolution2d_7", "inbound_nodes": [[["icon_goes_here", 0, 0]]], "config": {"border_mode": "same", "trainable": true, "nb_col": 4, "subsample": [1, 1], "W_regularizer": null, "bias": true, "nb_row": 4, "activation": "relu", "name": "convolution2d_7", "b_regularizer": {"name": "L1L2Regularizer", "l2": 0.10000000149011612, "l1": 0.0}, "b_constraint": null, "W_constraint": null, "init": "glorot_uniform", "nb_filter": 64, "dim_ordering": "tf", "activity_regularizer": null}}, {"class_name": "Convolution2D", "name": "convolution2d_8", "inbound_nodes": [[["convolution2d_7", 0, 0]]], "config": {"border_mode": "same", "trainable": true, "nb_col": 4, "subsample": [1, 1], "W_regularizer": null, "bias": true, "nb_row": 4, "activation": "relu", "name": "convolution2d_8", "b_regularizer": {"name": "L1L2Regularizer", "l2": 0.10000000149011612, "l1": 0.0}, "b_constraint": null, "W_constraint": null, "init": "glorot_uniform", "nb_filter": 64, "dim_ordering": "tf", "activity_regularizer": null}}, {"class_name": "Convolution2D", "name": "convolution2d_9", "inbound_nodes": [[["convolution2d_8", 0, 0]]], "config": {"border_mode": "same", "trainable": true, "nb_col": 4, "subsample": [1, 1], "W_regularizer": null, "bias": true, "nb_row": 4, "activation": "relu", "name": "convolution2d_9", "b_regularizer": {"name": "L1L2Regularizer", "l2": 0.10000000149011612, "l1": 0.0}, "b_constraint": null, "W_constraint": null, "init": "glorot_uniform", "nb_filter": 64, "dim_ordering": "tf", "activity_regularizer": null}}, {"class_name": "Convolution2D", "name": "convolution2d_10", "inbound_nodes": [[["convolution2d_9", 0, 0]]], "config": {"border_mode": "same", "trainable": true, "nb_col": 4, "subsample": [1, 1], "W_regularizer": null, "bias": true, "nb_row": 4, "activation": "relu", "name": "convolution2d_10", "b_regularizer": {"name": "L1L2Regularizer", "l2": 0.10000000149011612, "l1": 0.0}, "b_constraint": null, "W_constraint": null, "init": "glorot_uniform", "nb_filter": 64, "dim_ordering": "tf", "activity_regularizer": null}}, {"class_name": "Convolution2D", "name": "convolution2d_11", "inbound_nodes": [[["convolution2d_10", 0, 0]]], "config": {"border_mode": "same", "trainable": true, "nb_col": 4, "subsample": [1, 1], "W_regularizer": null, "bias": true, "nb_row": 4, "activation": "relu", "name": "convolution2d_11", "b_regularizer": {"name": "L1L2Regularizer", "l2": 0.10000000149011612, "l1": 0.0}, "b_constraint": null, "W_constraint": null, "init": "glorot_uniform", "nb_filter": 16, "dim_ordering": "tf", "activity_regularizer": null}}, {"class_name": "Convolution2D", "name": "convolution2d_12", "inbound_nodes": [[["convolution2d_11", 0, 0]]], "config": {"border_mode": "same", "trainable": true, "nb_col": 4, "subsample": [1, 1], "W_regularizer": null, "bias": true, "nb_row": 4, "activation": "relu", "name": "convolution2d_12", "b_regularizer": {"name": "L1L2Regularizer", "l2": 0.10000000149011612, "l1": 0.0}, "b_constraint": null, "W_constraint": null, "init": "glorot_uniform", "nb_filter": 4, "dim_ordering": "tf", "activity_regularizer": null}}, {"class_name": "AveragePooling2D", "name": "averagepooling2d_2", "inbound_nodes": [[["convolution2d_12", 0, 0]]], "config": {"border_mode": "valid", "pool_size": [2, 2], "strides": [2, 2], "trainable": true, "dim_ordering": "tf", "name": "averagepooling2d_2"}}], "output_layers": [["averagepooling2d_2", 0, 0]], "input_layers": [["icon_goes_here", 0, 0]], "name": "model_2"}}
--------------------------------------------------------------------------------
/web/hintbot.js:
--------------------------------------------------------------------------------
1 | function loadHintBot() {
2 | return new KerasJS.Model({
3 | filepaths: {
4 | model: 'web/hintbot.json',
5 | weights: 'web/hintbot_weights.buf',
6 | metadata: 'web/hintbot_metadata.json'
7 | },
8 | gpu: true
9 | })
10 | }
11 |
12 | function displayImage(image, img, w, h, scale) {
13 | // Frankensteined from http://stackoverflow.com/questions/22823752/creating-image-from-array-in-javascript-and-html5
14 | var canvas = document.createElement('canvas');
15 | var ctx = canvas.getContext('2d');
16 |
17 | canvas.width = w;
18 | canvas.height = h;
19 |
20 | // create imageData object
21 | var idata = ctx.createImageData(w, h);
22 |
23 | // set our buffer as source
24 | idata.data.set(image);
25 |
26 | // update canvas with new data
27 | ctx.putImageData(idata, 0, 0);
28 |
29 | var dataUri = canvas.toDataURL();
30 |
31 | img.src = dataUri;
32 | }
33 |
34 | function base64toRGBA(image) {
35 | // Frankensteined from http://stackoverflow.com/questions/8751020/how-to-get-a-pixels-x-y-coordinate-color-from-an-image
36 | // Bugfix from https://taditdash.wordpress.com/2016/10/04/uncaught-indexsizeerror-failed-to-execute-getimagedata-on-canvasrenderingcontext2d-the-source-width-is-0/
37 | return new Promise((resolve, reject) => {
38 | var img = document.createElement("img");
39 | img.src = image;
40 | img.onload = () => {
41 | var canvas = document.createElement("canvas");
42 | canvas.width = img.width || img.naturalWidth;
43 | canvas.height = img.height || img.naturalHeight;
44 | console.log("Canvas size", canvas.width, canvas.height);
45 | console.log("image", image, "img", img);
46 | canvas.getContext('2d').drawImage(img, 0, 0, canvas.width, canvas.height);
47 | var imageData = canvas.getContext('2d').getImageData(0, 0, canvas.width, canvas.height).data;
48 | resolve(imageData);
49 | }
50 | });
51 | }
52 |
53 | function hide(id) {
54 | document.getElementById(id).classList.add("hidden");
55 | }
56 |
57 | function show(id) {
58 | document.getElementById(id).classList.remove("hidden");
59 | }
60 |
61 | function getFirstProperty(obj) {
62 | for (var i in obj) {
63 | return obj[i];
64 | }
65 | }
66 |
67 | function init() {
68 | hintbot = loadHintBot();
69 | hintbot.ready().then(() => {
70 | // Show file upload button
71 | hide("spinner");
72 | show("fileInputWrapper");
73 | show("description");
74 | // Enable file uploads
75 | document.getElementById("fileInput").addEventListener("change", function() {
76 | var reader = new FileReader();
77 | reader.onload = function() {
78 | var base64 = this.result;
79 | base64toRGBA(base64).then((rgba) => {
80 | var flatrgba = new Float32Array([].concat.apply([], rgba));
81 |
82 | var original = document.getElementById("original");
83 | var hinted = document.getElementById("hinted");
84 |
85 | displayImage(new Uint8ClampedArray(flatrgba), original, 32, 32, 0.5);
86 |
87 | const inputData = {
88 | 'icon_goes_here': flatrgba
89 | };
90 | hintbot.predict(inputData).then(outputData => {
91 | var prediction = new Uint8ClampedArray(getFirstProperty(outputData));
92 | displayImage(prediction, hinted, 16, 16, 1.0);
93 | show("display");
94 | }).catch(err => {
95 | console.log(err);
96 | });
97 | });
98 | }
99 |
100 | reader.readAsDataURL(this.files[0]);
101 | }, false);
102 | });
103 | }
104 | window.onload = init;
105 |
--------------------------------------------------------------------------------
/hintbot.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | from keras.layers import Input, Convolution2D, MaxPooling2D, AveragePooling2D
3 | from keras.models import Model
4 | from keras.regularizers import l2
5 | from scipy.misc import imresize
6 | import numpy as np
7 | import os
8 | import skimage.io as io
9 | import argparse
10 |
11 |
12 | # Argument parser
13 | parser = argparse.ArgumentParser(description='Downscale icons while preserving crispness',
14 | formatter_class=argparse.ArgumentDefaultsHelpFormatter)
15 | add_arg = parser.add_argument
16 | add_arg("files", nargs="*", default=[])
17 | add_arg("--weights", default=None, type=str, help='h5 file to read/write weights to.')
18 | add_arg("--loadweights", default="weights.h5", type=str, help='h5 file to read weights from.')
19 | add_arg("--saveweights", default="weights.h5", type=str, help='h5 file to write weights to.')
20 | add_arg("--epochs", default=10, type=int, help='Number of epochs to train.')
21 | add_arg("--split", default=0.9, type=float, help='Percent of the data to train on.')
22 | add_arg("--predictionfolder", default="predictions", type=str, help="Folder to save predictions in")
23 | add_arg("--updateweb", action='store_true', help='Update the model/weights stored in /web/')
24 | add_arg("--makeiconset", action='store_true', help='Create a full iconset from the provided file')
25 |
26 |
27 | # Determine paths for weights
28 | args = parser.parse_args()
29 | load_weights_filepath = args.loadweights
30 | save_weights_filepath = args.saveweights
31 | weights_filepath = args.weights
32 | if (weights_filepath):
33 | load_weights_filepath = weights_filepath
34 | save_weights_filepath = weights_filepath
35 |
36 |
37 | def createModel(w=None,h=None):
38 | # Input placeholder
39 | original = Input(shape=(w, h, 4), name='icon_goes_here')
40 |
41 | # Model layer stack
42 | x = original
43 | x = Convolution2D(64, 4, 4, activation='relu', border_mode='same', b_regularizer=l2(0.1))(x)
44 | x = Convolution2D(64, 4, 4, activation='relu', border_mode='same', b_regularizer=l2(0.1))(x)
45 | x = Convolution2D(64, 4, 4, activation='relu', border_mode='same', b_regularizer=l2(0.1))(x)
46 | x = Convolution2D(64, 4, 4, activation='relu', border_mode='same', b_regularizer=l2(0.1))(x)
47 | x = AveragePooling2D((2, 2), border_mode='valid')(x)
48 | x = Convolution2D(16, 4, 4, activation='relu', border_mode='same', b_regularizer=l2(0.1))(x)
49 | x = Convolution2D(4, 4, 4, activation='relu', border_mode='same', b_regularizer=l2(0.1))(x)
50 | downscaled = x
51 |
52 | # Compile model
53 | hintbot = Model(input=original, output=downscaled)
54 | hintbot.compile(optimizer='adam', loss='mean_squared_error')
55 | # Train
56 | if (os.path.isfile(load_weights_filepath)):
57 | hintbot.load_weights(load_weights_filepath)
58 | return hintbot
59 |
60 | def predict(model, x):
61 | x = np.asarray([x])
62 | return model.predict(x).clip(0,255).astype(np.uint8)[0]
63 |
64 | def train(model):
65 | import data
66 | # Prepare input
67 | x_train, y_train, x_test, y_test = data.loadImages("input_iconsets", -1, args.split)
68 | hintbot.fit(x_train, y_train, nb_epoch=args.epochs, batch_size=256, shuffle=True, validation_data=(x_test, y_test))
69 |
70 | # Save weights
71 | if (save_weights_filepath):
72 | print("saving weights")
73 | hintbot.save_weights(save_weights_filepath, overwrite=True)
74 |
75 | def predictsinglefile(model, filepath):
76 | filepath = os.path.abspath(filepath)
77 | assert os.path.isfile(filepath), "File " + str(filepath) + " does not exist"
78 | outputpath = os.path.dirname(filepath) + "/" + os.path.splitext(os.path.basename(filepath))[0] + "_hinted.png"
79 | original = io.imread(filepath)
80 | hinted = predict(model, original)
81 | io.imsave(outputpath, hinted)
82 |
83 | def predicticonset(model, filepath):
84 | filepath = os.path.abspath(filepath)
85 | # Make sure file exists
86 | assert os.path.isfile(filepath), "File " + str(filepath) + " does not exist"
87 |
88 | # Set up iconset folder
89 | outputpath = os.path.dirname(filepath) + "/" + os.path.splitext(os.path.basename(filepath))[0] + ".iconset/"
90 | if not os.path.exists(outputpath):
91 | os.makedirs(outputpath)
92 |
93 | # List of sizes to make (lowest to highest)
94 | sizes = [(1024, ["icon_512x512@2x"]),
95 | (512, ["icon_512x512", "icon_256x256@2x"]),
96 | (256, ["icon_256x256", "icon_128x128@2x"]),
97 | (128, ["icon_128x128"]),
98 | (64, ["icon_32x32@2x"]),
99 | (32, ["icon_32x32", "icon_16x16@2x"]),
100 | (16, ["icon_16x16"])
101 | ]
102 | sizes.reverse()
103 |
104 | # Read the given image
105 | current = io.imread(filepath)
106 |
107 | # Convert it to all of the sizes in decreasing order
108 | while (sizes):
109 | currentsize = len(current)
110 | while (currentsize < sizes[-1][0]):
111 | sizes.pop()
112 | targetsizeandnames = sizes.pop()
113 | targetsize = targetsizeandnames[0]
114 | targetnames = targetsizeandnames[1]
115 | if (currentsize != targetsize):
116 | current = imresize(current, targetsize)
117 | for name in targetnames:
118 | io.imsave(outputpath + name + ".png", current)
119 | current = predict(model, current)
120 | def saveweb(filepath="web"):
121 | # Need to specify explicit dimensions for keras.js
122 | model = createModel(32,32)
123 | print("Saving weights and model to " + filepath + "...")
124 | save_weights_web_filepath = filepath + "/hintbot.h5"
125 | save_model_web_filepath = filepath + "/hintbot.json"
126 | model.save_weights(save_weights_web_filepath, overwrite=True)
127 | import encoder
128 | enc = encoder.Encoder(save_weights_web_filepath)
129 | enc.serialize()
130 | enc.save()
131 | with open(save_model_web_filepath, 'w') as f:
132 | f.write(model.to_json())
133 |
134 | # Create model
135 | hintbot = createModel()
136 |
137 | # Save weights to web directory if necessary
138 | if (args.updateweb):
139 | saveweb()
140 |
141 | # Train if no files provided
142 | if len(args.files) == 0:
143 | train(hintbot)
144 | else:
145 | # Otherwise, predict
146 | for filepath in args.files:
147 | if args.makeiconset:
148 | predicticonset(hintbot, filepath)
149 | else:
150 | predictsinglefile(hintbot, filepath)
151 |
--------------------------------------------------------------------------------
/web/weblas.js:
--------------------------------------------------------------------------------
1 | (function(f){if(typeof exports==="object"&&typeof module!=="undefined"){module.exports=f()}else if(typeof define==="function"&&define.amd){define([],f)}else{var g;if(typeof window!=="undefined"){g=window}else if(typeof global!=="undefined"){g=global}else if(typeof self!=="undefined"){g=self}else{g=this}g.weblas = f()}})(function(){var define,module,exports;return (function e(t,n,r){function s(o,u){if(!n[o]){if(!t[o]){var a=typeof require=="function"&&require;if(!u&&a)return a(o,!0);if(i)return i(o,!0);var f=new Error("Cannot find module '"+o+"'");throw f.code="MODULE_NOT_FOUND",f}var l=n[o]={exports:{}};t[o][0].call(l.exports,function(e){var n=t[o][1][e];return s(n?n:e)},l,l.exports,e,t,n,r)}return n[o].exports}var i=typeof require=="function"&&require;for(var o=0;o=0}function fromArray(e,t,l){var a,r,c=[];l?(c[1]=e.length,c[0]=e[0].length):(c[0]=e.length,c[1]=e[0].length),r=c[1],t=t||Float32Array,a=new t(c[0]*c[1]);for(var u=0;ur;r++)for(var c=0;t>c;c++)a[c*e+r]=l[r*t+c];return a}var globals=require("./lib/globals"),pipeline=require("./lib/pipeline"),SGEMMCalculator=require("./lib/sgemmcalculator"),SAXPYCalculator=require("./lib/saxpycalculator"),SSCALCalculator=require("./lib/sscalcalculator"),SDWNSCalculator=require("./lib/sdwnscalculator"),SCLMPCalculator=require("./lib/sclmpcalculator"),test=require("./lib/test"),gl=globals.gl,sgemmcalculator=new SGEMMCalculator(gl),saxpycalculator=new SAXPYCalculator(gl),sscalcalculator=new SSCALCalculator(gl),sdwnscalculator=new SDWNSCalculator(gl),sclmpcalculator=new SCLMPCalculator(gl);module.exports={saxpy:saxpy,sscal:sscal,sgemm:sgemm,sstd:sstd,sdwns:sdwns,sclmp:sclmp,pipeline:pipeline,gpu:{gl:gl,sgemm:pipeline.sgemmcalculator.calculate.bind(pipeline.sgemmcalculator),sscal:pipeline.sscalcalculator.calculate.bind(pipeline.sscalcalculator),sclmp:pipeline.sclmpcalculator.calculate.bind(pipeline.sclmpcalculator),sdwns:pipeline.sdwnscalculator.calculate.bind(pipeline.sdwnscalculator),encode:gl.encode.bind(gl)},util:{fromArray:fromArray,transpose:transpose},test:test},String.prototype.format||(String.prototype.format=function(){var e=arguments;return this.replace(/{(\d+)}/g,function(t,l){return"undefined"!=typeof e[l]?e[l]:t})});
3 | },{"./lib/globals":2,"./lib/pipeline":3,"./lib/saxpycalculator":4,"./lib/sclmpcalculator":5,"./lib/sdwnscalculator":6,"./lib/sgemmcalculator":7,"./lib/sscalcalculator":8,"./lib/test":10}],2:[function(require,module,exports){
4 | var WebGL=require("./webgl"),gl=new WebGL;module.exports={gl:gl};
5 | },{"./webgl":11}],3:[function(require,module,exports){
6 | function sscal(l,a,e){var r=e.shape[0],s=e.shape[1],c=new Tensor([r,s],null);return sscalcalculator.calculate(r,s,l,a,e.texture,c.texture),c}function sgemm(l,a,e,r,s){if(e.shape[1]!==a.shape[1])throw new Error("Second dimension must be of same size for input Tensors (second Tensor is transposed).");var c,t=a.shape[0],u=e.shape[0],o=a.shape[1];c=s?s.texture:null;var n=new Tensor([t,u],null);return sgemmcalculator.calculate(t,u,o,l,a.texture,e.texture,r,c,n.texture),n}function sdwns(l,a,e,r){if(r.shape[1]%l!==0)throw new Error("Second dimension of tensor must be a multiple of channels");var s=r.shape[0],c=r.shape[1]/l,t=Math.floor((s-a)/e)+1,u=Math.floor((c-a)/e)+1,o=new Tensor([t,u*l],null);return sdwnscalculator.calculate(s,c,l,a,e,r.texture,o.texture),o}function sclmp(l,a,e){l=null!=l?l:Number.MIN_VALUE,a=null!=a?a:Number.MAX_VALUE;var r=e.shape[0],s=e.shape[1],c=new Tensor([r,s],null);return sclmpcalculator.calculate(r,s,l,a,e.texture,c.texture),c}var globals=require("./globals"),SGEMMCalculator=require("./sgemmcalculator"),SAXPYCalculator=require("./saxpycalculator"),SSCALCalculator=require("./sscalcalculator"),SDWNSCalculator=require("./sdwnscalculator"),SCLMPCalculator=require("./sclmpcalculator"),Tensor=require("./tensor"),gl=globals.gl,sgemmcalculator=new SGEMMCalculator(gl,!1),saxpycalculator=new SAXPYCalculator(gl,!1),sscalcalculator=new SSCALCalculator(gl,!1),sdwnscalculator=new SDWNSCalculator(gl,!1),sclmpcalculator=new SCLMPCalculator(gl,!1);module.exports={Tensor:Tensor,sscal:sscal,sgemm:sgemm,sdwns:sdwns,sclmp:sclmp,sgemmcalculator:sgemmcalculator,saxpycalculator:saxpycalculator,sscalcalculator:sscalcalculator,sdwnscalculator:sdwnscalculator,sclmpcalculator:sclmpcalculator};
7 | },{"./globals":2,"./saxpycalculator":4,"./sclmpcalculator":5,"./sdwnscalculator":6,"./sgemmcalculator":7,"./sscalcalculator":8,"./tensor":9}],4:[function(require,module,exports){
8 | function SAXPYCalculator(t,n){this.webgl=t,this.standalone=n||!0;var e="#define GLSLIFY 1\nprecision highp float;\n\nvarying vec2 outTex; // texture coords of row/column to calculate\nuniform sampler2D X; // texture with data from padded A\nuniform sampler2D Y; // texture with data from padded transpose of B\nuniform int N;\nuniform float a; // coefficient to multiplication\n\n// Render float to bytes according to IEEE 754 Floating Point\nvec4 encode_float_1540259130(float val) {\n\n // TODO: correctly handle denormal numbers\n // http://www.2ality.com/2012/04/number-encoding.html\n float a = abs(val); // encode absolute value + sign\n float exp = floor(log2(a)); // number of powers of 2\n float mant = pow(2.,log2(a)-exp) * pow(2.,23.); // multiply to fill 24 bits (implied leading 1)\n float mant1 = floor(mant / 256. / 256.); // first 8 bits of mantissa\n float mant2 = mod(floor(mant / 256.),256.); // second 8 bits\n float mant3 = mod(mant,256.); // third 8 bits\n\n highp float sign = 128.-128.*(a/val); // sign bit is 256 or 0\n highp float e = (sign+exp+127.)/510.; // exponent and sign\n highp float m1 = (mant1-(128.*(1.-mod(exp+127.,2.))))/255.; // handle leading bit\n highp float m2 = (mant2)/255.; // middle part\n highp float m3 = (mant3+.5)/255.; // scale to 0 - 255\n\n return vec4(m3,m2,m1,e);\n}\n\n// select an element from a vector based on index\nfloat select_index_1604150559(vec4 v, int index){\n float val;\n if (index == 0) {\n val = v.r;\n } else if(index == 1) {\n val = v.g;\n } else if(index == 2) {\n val = v.b;\n } else if(index == 3){\n val = v.a;\n } else {\n // should never be here\n val = 0.0;\n }\n\n return val;\n}\n\nvoid main(void) {\n\n // get the implied row and column from .y and .x of passed (output)\n // texture coordinate. These map directly to input texture space when\n // the relevant dimensions are the same.\n float row = outTex.y;\n float col = outTex.x;\n\n // direct usage of col requires output be padded exactly like input\n vec4 x = texture2D( X, vec2(col, row));\n vec4 y = texture2D( Y, vec2(col, row));\n vec4 sum_v = (a * x) + y;\n int channel = int(mod(col * float(N), 4.0 ));\n float sum = select_index_1604150559(sum_v, channel);\n\n if (sum == 0.) {\n gl_FragColor = vec4(0.,0.,0.,0.);\n return;\n }\n\n // output vec4 with bytes for an IEEE754 32-bit floating point number\n gl_FragColor = encode_float_1540259130(sum);\n}\n";this.standalone?this.program=this.webgl.createProgram(e):this.program=this.webgl.createProgram(p)}var WebGL=require("./webgl");module.exports=SAXPYCalculator,SAXPYCalculator.TEXTURE_UNIFORM_NAME_0="X",SAXPYCalculator.TEXTURE_UNIFORM_NAME_1="Y",SAXPYCalculator.LENGTH_UNIFORM_NAME="N",SAXPYCalculator.COEFFICIENT_UNIFORM_NAME="a",SAXPYCalculator.prototype.calculate=function(t,n,e,o,a){var l=this.webgl.context;this.webgl.selectProgram(this.program),this.bindInputTexture(e,l.TEXTURE0,SAXPYCalculator.TEXTURE_UNIFORM_NAME_0),this.bindInputTexture(o,l.TEXTURE1,SAXPYCalculator.TEXTURE_UNIFORM_NAME_1);var i=this.webgl.getPad(t);this.bindUniforms(t+i,n),this.webgl.bindOutputTexture(1,t+i,a),l.drawElements(l.TRIANGLES,6,l.UNSIGNED_SHORT,0),this.webgl.unbindInputTexture(l.TEXTURE0),this.webgl.unbindInputTexture(l.TEXTURE1)},SAXPYCalculator.prototype.bindInputTexture=function(t,n,e){var o=this.webgl.context,a=this.program;o.activeTexture(n),o.bindTexture(o.TEXTURE_2D,t);var l=o.getUniformLocation(a,e);o.uniform1i(l,n-o.TEXTURE0)},SAXPYCalculator.prototype.bindUniforms=function(t,n){var e=this.webgl.context,o=e.getUniformLocation(this.program,SAXPYCalculator.LENGTH_UNIFORM_NAME),a=e.getUniformLocation(this.program,SAXPYCalculator.COEFFICIENT_UNIFORM_NAME);e.uniform1i(o,t),e.uniform1f(a,n)};
9 | },{"./webgl":11}],5:[function(require,module,exports){
10 | function SCLMPCalculator(n,t){this.webgl=n,this.standalone=null!=t?t:!0;var e="#define GLSLIFY 1\nprecision highp float;\n\nvarying vec2 outTex; // texture coords of row/column to calculate\nuniform sampler2D X; // texture with data from padded A\nuniform int N; // number of columns\nuniform int pad; // additional columns to nearest multiple of four\nuniform float a; // lower bound\nuniform float b; // upper bound\n\n// Render float to bytes according to IEEE 754 Floating Point\nvec4 encode_float_1540259130(float val) {\n\n // TODO: correctly handle denormal numbers\n // http://www.2ality.com/2012/04/number-encoding.html\n float a = abs(val); // encode absolute value + sign\n float exp = floor(log2(a)); // number of powers of 2\n float mant = pow(2.,log2(a)-exp) * pow(2.,23.); // multiply to fill 24 bits (implied leading 1)\n float mant1 = floor(mant / 256. / 256.); // first 8 bits of mantissa\n float mant2 = mod(floor(mant / 256.),256.); // second 8 bits\n float mant3 = mod(mant,256.); // third 8 bits\n\n highp float sign = 128.-128.*(a/val); // sign bit is 256 or 0\n highp float e = (sign+exp+127.)/510.; // exponent and sign\n highp float m1 = (mant1-(128.*(1.-mod(exp+127.,2.))))/255.; // handle leading bit\n highp float m2 = (mant2)/255.; // middle part\n highp float m3 = (mant3+.5)/255.; // scale to 0 - 255\n\n return vec4(m3,m2,m1,e);\n}\n\n// select an element from a vector based on index\nfloat select_index_1604150559(vec4 v, int index){\n float val;\n if (index == 0) {\n val = v.r;\n } else if(index == 1) {\n val = v.g;\n } else if(index == 2) {\n val = v.b;\n } else if(index == 3){\n val = v.a;\n } else {\n // should never be here\n val = 0.0;\n }\n\n return val;\n}\n\nvoid main(void) {\n\n // get the implied row and column from .y and .x of passed (output)\n // texture coordinate. These map directly to input texture space when\n // the relevant dimensions are the same.\n float row = outTex.y;\n float col = outTex.x;\n\n // return 0.0 if in padded region of output texture\n if(col * float(N + pad) > float(N) ) {\n gl_FragColor = vec4(0.,0.,0.,0.);\n return;\n }\n\n // direct usage of col requires output be padded exactly like input\n vec4 x = texture2D( X, vec2(col, row));\n vec4 val = clamp(x, a, b);\n\n // select and output channel (standalone version only)\n int channel = int(mod(col * float(N + pad), 4.0));\n float sum = select_index_1604150559(val, channel);\n\n if (sum == 0.) {\n gl_FragColor = vec4(0.,0.,0.,0.);\n return;\n }\n\n // output vec4 with bytes for an IEEE754 32-bit floating point number\n gl_FragColor = encode_float_1540259130(sum);\n}\n",o="#define GLSLIFY 1\nprecision highp float;\n\nvarying vec2 outTex; // texture coords of row/column to calculate\nuniform sampler2D X; // texture with data from padded A\nuniform int N; // number of columns\nuniform int pad; // additional columns to nearest multiple of four\nuniform float a; // lower bound\nuniform float b; // upper bound\n\n// set pad values to 0.0, if in padded region of output texture\nvoid fix_pad_1540259130(inout vec4 v, int pad){\n v.a = 0.0;\n if(pad == 2){\n v.b = 0.0;\n } else if(pad == 3){\n v.b = 0.0;\n v.g = 0.0;\n }\n}\n\nvoid main(void) {\n\n // get the implied row and column from .y and .x of passed (output)\n // texture coordinate. These map directly to input texture space when\n // the relevant dimensions are the same.\n float row_t = outTex.y;\n float col_t = outTex.x;\n float col = (col_t * float(N + pad) - 2.0); // index of first element in pixel (matrix space)\n\n // direct usage of col requires output be padded exactly like input\n vec4 x = texture2D( X, vec2(col_t, row_t));\n vec4 val_v = clamp(x, a, b);\n\n // is last element in pixel past row length?\n if(pad > 0 && (col + 4.0) > float(N) ) {\n // fix elements in padded region\n fix_pad_1540259130(val_v, pad);\n }\n\n gl_FragColor = val_v;\n}\n";this.standalone?this.program=this.webgl.createProgram(e):this.program=this.webgl.createProgram(o)}var WebGL=require("./webgl");module.exports=SCLMPCalculator,SCLMPCalculator.TEXTURE_UNIFORM_NAME_0="X",SCLMPCalculator.LENGTH_UNIFORM_NAME="N",SCLMPCalculator.LOWER_UNIFORM_NAME="a",SCLMPCalculator.UPPER_UNIFORM_NAME="b",SCLMPCalculator.prototype.calculate=function(n,t,e,o,a,l){e=null!=e?e:Number.MIN_VALUE,o=null!=o?o:Number.MAX_VALUE;var i=this.webgl.context;this.webgl.selectProgram(this.program),this.bindInputTexture(a,i.TEXTURE0,SCLMPCalculator.TEXTURE_UNIFORM_NAME_0);var r=this.webgl.getPad(t);this.bindUniforms(t,r,e,o),this.standalone?this.webgl.bindOutputTexture(n,t+r,l):this.webgl.bindOutputTexture(n,(t+r)/4,l),i.drawElements(i.TRIANGLES,6,i.UNSIGNED_SHORT,0),this.webgl.unbindInputTexture(i.TEXTURE0)},SCLMPCalculator.prototype.bindInputTexture=function(n,t,e){var o=this.webgl.context,a=this.program;o.activeTexture(t),o.bindTexture(o.TEXTURE_2D,n);var l=o.getUniformLocation(a,e);o.uniform1i(l,t-o.TEXTURE0)},SCLMPCalculator.prototype.bindUniforms=function(n,t,e,o){var a=this.webgl.context,l=a.getUniformLocation(this.program,SCLMPCalculator.LENGTH_UNIFORM_NAME),i=a.getUniformLocation(this.program,SCLMPCalculator.UPPER_UNIFORM_NAME),r=a.getUniformLocation(this.program,SCLMPCalculator.LOWER_UNIFORM_NAME),u=a.getUniformLocation(this.program,"pad");a.uniform1i(l,n),a.uniform1i(u,t),a.uniform1f(r,e),a.uniform1f(i,o)};
11 | },{"./webgl":11}],6:[function(require,module,exports){
12 | function DownsampleCalculator(n,o){this.webgl=n,this.standalone=null!=o?o:!0;var t="#define GLSLIFY 1\n// TODO: unroll loop for stride == factor and small values (2, 3)\nprecision highp float;\n\nvarying vec2 outTex; // texture coords of row/column to calculate\nuniform sampler2D X; // texture with data from padded A\nuniform int factor; // width of image patch\nuniform float stride; // width between image patches\nuniform float C; // number of channels\nuniform float M;\nuniform float N;\nuniform float N_out;\nuniform float M_out;\n\n// Render float to bytes according to IEEE 754 Floating Point\nvec4 encode_float_1540259130(float val) {\n\n // TODO: correctly handle denormal numbers\n // http://www.2ality.com/2012/04/number-encoding.html\n float a = abs(val); // encode absolute value + sign\n float exp = floor(log2(a)); // number of powers of 2\n float mant = pow(2.,log2(a)-exp) * pow(2.,23.); // multiply to fill 24 bits (implied leading 1)\n float mant1 = floor(mant / 256. / 256.); // first 8 bits of mantissa\n float mant2 = mod(floor(mant / 256.),256.); // second 8 bits\n float mant3 = mod(mant,256.); // third 8 bits\n\n highp float sign = 128.-128.*(a/val); // sign bit is 256 or 0\n highp float e = (sign+exp+127.)/510.; // exponent and sign\n highp float m1 = (mant1-(128.*(1.-mod(exp+127.,2.))))/255.; // handle leading bit\n highp float m2 = (mant2)/255.; // middle part\n highp float m3 = (mant3+.5)/255.; // scale to 0 - 255\n\n return vec4(m3,m2,m1,e);\n}\n\n// select an element from a vector based on index\nfloat select_index_1604150559(vec4 v, int index){\n float val;\n if (index == 0) {\n val = v.r;\n } else if(index == 1) {\n val = v.g;\n } else if(index == 2) {\n val = v.b;\n } else if(index == 3){\n val = v.a;\n } else {\n // should never be here\n val = 0.0;\n }\n\n return val;\n}\n\nvoid main(void) {\n\n // get the implied row and column from .y and .x of passed (output)\n // texture coordinate and translate to output pixel space.\n float row = floor(outTex.y * M_out); // row on output texture (matrix space)\n float col = floor(outTex.x * N_out); // column on output texture (matrix space)\n float vcol = floor(col / C); // virtual column on output texture (matrix space)\n float vchannel = floor(mod(col, C)); // virtual channel on output texture\n\n const float min = -1.0e+08;\n vec4 currentMax = vec4(min, min, min, min);\n\n float deltaY = 1.0/M;\n float deltaX = 1.0/N;\n float y = ((row * stride) + 0.5)*deltaY; // texture position of input row\n float x;\n float z = vchannel * deltaX;\n for (int i = 0; i < 100; i += 1) {\n if (i >= factor) {\n break;\n }\n x = ((vcol * stride * C) + 0.5) * deltaX; // texture position of input column\n\n for (int j = 0; j < 100; j += 1) {\n if (j >= factor) {\n break;\n }\n\n vec2 coords = vec2(x + z, y);\n vec4 x_v = texture2D(X, coords);\n currentMax = max(currentMax, x_v);\n\n x += (deltaX * C);\n }\n y += deltaY;\n }\n int chan = int(mod(outTex.x * N_out, 4.0 ));\n float val = select_index_1604150559(currentMax, int(chan));\n if (val == 0.) {\n gl_FragColor = vec4(0.,0.,0.,0.);\n return;\n }\n\n gl_FragColor = encode_float_1540259130(val);\n}\n";p="#define GLSLIFY 1\n// TODO: unroll loop for stride == factor and small values (2, 3)\nprecision highp float;\n\nvarying vec2 outTex; // texture coords of row/column to calculate\nuniform sampler2D X; // texture with data from padded A\nuniform int factor; // width of image patch\nuniform float stride; // width between image patches\nuniform float C; // number of channels\nuniform float M;\nuniform float N;\nuniform float N_out;\nuniform float M_out;\n\nvoid main(void) {\n\n // get the implied row and column from .y and .x of passed (output)\n // texture coordinate and translate to output pixel space.\n float row = floor(outTex.y * M_out); // row on output texture (pixel space)\n float col = floor(outTex.x * N_out); // column on output texture (matrix space)\n float vcol = floor(col / C); // virtual column on output texture (matrix space)\n float vchannel = floor(mod(col, C)); // virtual channel on output texture\n\n const float min = -1.0e+08;\n vec4 currentMax = vec4(min, min, min, min);\n\n float deltaY = 1.0/M;\n float deltaX = 1.0/N;\n float y = ((row * stride) + 0.5)*deltaY; // texture position of input row\n float x;\n float z = vchannel * deltaX;\n for (int i = 0; i < 100; i += 1) {\n if (i >= factor) {\n break;\n }\n x = ((vcol * stride * C) + 0.5) * deltaX; // texture position of input column\n\n for (int j = 0; j < 100; j += 1) {\n if (j >= factor) {\n break;\n }\n\n vec2 coords = vec2(x + z, y);\n vec4 x_v = texture2D(X, coords);\n currentMax = max(currentMax, x_v);\n\n x += (deltaX * C);\n }\n y += deltaY;\n }\n\n gl_FragColor = currentMax;\n}\n",this.standalone?this.program=this.webgl.createProgram(t):this.program=this.webgl.createProgram(p)}var WebGL=require("./webgl");module.exports=DownsampleCalculator,DownsampleCalculator.TEXTURE_UNIFORM_NAME_0="X",DownsampleCalculator.INPUT_ROW_COUNT_UNIFORM_NAME="M",DownsampleCalculator.INPUT_COLUMN_COUNT_UNIFORM_NAME="N",DownsampleCalculator.OUTPUT_ROW_COUNT_UNIFORM_NAME="M_out",DownsampleCalculator.OUTPUT_COLUMN_COUNT_UNIFORM_NAME="N_out",DownsampleCalculator.FACTOR_UNIFORM_NAME="factor",DownsampleCalculator.STRIDE_UNIFORM_NAME="stride",DownsampleCalculator.CHANNEL_COUNT_UNIFORM_NAME="C",DownsampleCalculator.prototype.calculate=function(n,o,t,a,e,l,r){if(t%WebGL.COMPONENTS_PER_TEXEL!=0)throw new Error("Channel count must be a multiple of "+WebGL.COMPONENTS_PER_TEXEL);var i=this.webgl.context,u=(Math.floor((o-a)/e)+1)*t,f=Math.floor((n-a)/e)+1;this.webgl.selectProgram(this.program),this.bindInputTexture(l,i.TEXTURE0,DownsampleCalculator.TEXTURE_UNIFORM_NAME_0),this.bindUniforms(n,o*t,f,u,a,e,t),this.standalone?this.webgl.bindOutputTexture(f,u,r):this.webgl.bindOutputTexture(f,u/WebGL.COMPONENTS_PER_TEXEL,r),i.drawElements(i.TRIANGLES,6,i.UNSIGNED_SHORT,0),this.webgl.unbindInputTexture(i.TEXTURE0)},DownsampleCalculator.prototype.bindInputTexture=function(n,o,t){var a=this.webgl.context,e=this.program;a.activeTexture(o),a.bindTexture(a.TEXTURE_2D,n);var l=a.getUniformLocation(e,t);a.uniform1i(l,o-a.TEXTURE0)},DownsampleCalculator.prototype.bindUniforms=function(n,o,t,a,e,l,r){var i=this.webgl.context,u=i.getUniformLocation(this.program,DownsampleCalculator.INPUT_ROW_COUNT_UNIFORM_NAME),f=i.getUniformLocation(this.program,DownsampleCalculator.INPUT_COLUMN_COUNT_UNIFORM_NAME),m=i.getUniformLocation(this.program,DownsampleCalculator.OUTPUT_ROW_COUNT_UNIFORM_NAME),c=i.getUniformLocation(this.program,DownsampleCalculator.OUTPUT_COLUMN_COUNT_UNIFORM_NAME),s=i.getUniformLocation(this.program,DownsampleCalculator.FACTOR_UNIFORM_NAME),p=i.getUniformLocation(this.program,DownsampleCalculator.STRIDE_UNIFORM_NAME),d=i.getUniformLocation(this.program,DownsampleCalculator.CHANNEL_COUNT_UNIFORM_NAME);i.uniform1f(u,n),i.uniform1f(f,o),i.uniform1f(m,t),i.uniform1f(c,a),i.uniform1i(s,e),i.uniform1f(p,l),i.uniform1f(d,r)};
13 | },{"./webgl":11}],7:[function(require,module,exports){
14 | function SGEMMCalculator(t,e){this.webgl=t,this.standalone=null!=e?e:!0;var n="#define GLSLIFY 1\n// fragment shader that calculates the matrix product and renders each\n// element to the bytes representing a 32-bit IEEE754 floating point in\n// the output RGBA canvas.\n// readPixel is used to read the bytes.\n\nprecision highp float;\n\nvarying vec2 outTex; // texture coords of row/column to calculate\nuniform sampler2D A; // texture with data from padded A\nuniform sampler2D B_t; // texture with data from padded transpose of B\nuniform int K; // number of elements in shared dimension\nuniform int N; // number of columns in output\nuniform int pad; //\nuniform float alpha; // coefficient to multiplication\n\n// sum of products between elements in row i (from A) x col j (from B)\n\n// Calculate the dot product between the row (from A) and column (from B)\n// identified by the passed indeces (output texture coordinate space).\n// We loop over elements in the row and column and sum the product\n// using the glsl `dot` function to process four elements at a time.\n// This four element optimization requires that the matrix B be\n// transposed before texel packing and that both matrices be padded\n// (with zeros) to a multiple of four (4) in their shared dimension.\nfloat dot_rowcol_1540259130(float y, float x, sampler2D A, sampler2D B_t, int K) {\n float delta_t = 1./float(K);// space (on texture) between elements\n float sum = 0.; // sum for this row/column pair\n float z = 0.5 * (4.0 * delta_t);// position for shared dimension on source textures\n\n for (int l=0 ; l<4096 ; ++l) {\n if(l >= K / 4) break; // stop when we finish the row/column\n // l is in pixel space, so we divide by four\n\n // retrieve next four elements from each texture\n vec4 a_ik = texture2D( A, vec2(z, y));\n vec4 b_kj = texture2D(B_t, vec2(z, x));\n\n // use `dot` to process four elements at a time\n sum += dot(a_ik, b_kj);\n z += (4.0 * delta_t); // (z + 0.5)*delta\n }\n return sum;\n}\n\n// Render float to bytes according to IEEE 754 Floating Point\nvec4 encode_float_1604150559(float val) {\n\n // TODO: correctly handle denormal numbers\n // http://www.2ality.com/2012/04/number-encoding.html\n float a = abs(val); // encode absolute value + sign\n float exp = floor(log2(a)); // number of powers of 2\n float mant = pow(2.,log2(a)-exp) * pow(2.,23.); // multiply to fill 24 bits (implied leading 1)\n float mant1 = floor(mant / 256. / 256.); // first 8 bits of mantissa\n float mant2 = mod(floor(mant / 256.),256.); // second 8 bits\n float mant3 = mod(mant,256.); // third 8 bits\n\n highp float sign = 128.-128.*(a/val); // sign bit is 256 or 0\n highp float e = (sign+exp+127.)/510.; // exponent and sign\n highp float m1 = (mant1-(128.*(1.-mod(exp+127.,2.))))/255.; // handle leading bit\n highp float m2 = (mant2)/255.; // middle part\n highp float m3 = (mant3+.5)/255.; // scale to 0 - 255\n\n return vec4(m3,m2,m1,e);\n}\n\nvoid main(void) {\n\n // get the implied row and column from .y and .x of passed (output)\n // texture coordinate. These map directly to input texture space when\n // the relevant dimensions are the same.\n float row_t = outTex.y;\n float col_t = outTex.x;\n\n // sum row x col for the passed pixel\n float sum = alpha * dot_rowcol_1540259130(row_t, col_t * float(N + pad)/float(N), A, B_t, K);\n\n if (sum == 0.) {\n gl_FragColor = vec4(0.,0.,0.,0.);\n return;\n }\n\n // output vec4 with bytes for an IEEE754 32-bit floating point number\n gl_FragColor = encode_float_1604150559(sum);\n}\n",o="#define GLSLIFY 1\n// fragment shader that calculates the matrix product (with additive 'C' term)\n// and renders each element to the bytes representing a 32-bit IEEE754 floating\n// point in the output RGBA canvas.\n// readPixel is used to read the bytes.\n\nprecision highp float;\n\nvarying vec2 outTex; // texture coords of row/column to calculate\nuniform sampler2D A; // texture with data from padded A\nuniform sampler2D B_t; // texture with data from padded transpose of B\nuniform sampler2D C; // texture with data from C\nuniform int K; // number of elements in shared dimension\nuniform int N; // number of columns in output\nuniform int pad; //\nuniform float alpha; // coefficient to multiplication\nuniform float beta; // coefficient to additive term\n\n// sum of products between elements in row i (from A) x col j (from B)\n\n// Calculate the dot product between the row (from A) and column (from B)\n// identified by the passed indeces (output texture coordinate space).\n// We loop over elements in the row and column and sum the product\n// using the glsl `dot` function to process four elements at a time.\n// This four element optimization requires that the matrix B be\n// transposed before texel packing and that both matrices be padded\n// (with zeros) to a multiple of four (4) in their shared dimension.\nfloat dot_rowcol_1540259130(float y, float x, sampler2D A, sampler2D B_t, int K) {\n float delta_t = 1./float(K);// space (on texture) between elements\n float sum = 0.; // sum for this row/column pair\n float z = 0.5 * (4.0 * delta_t);// position for shared dimension on source textures\n\n for (int l=0 ; l<4096 ; ++l) {\n if(l >= K / 4) break; // stop when we finish the row/column\n // l is in pixel space, so we divide by four\n\n // retrieve next four elements from each texture\n vec4 a_ik = texture2D( A, vec2(z, y));\n vec4 b_kj = texture2D(B_t, vec2(z, x));\n\n // use `dot` to process four elements at a time\n sum += dot(a_ik, b_kj);\n z += (4.0 * delta_t); // (z + 0.5)*delta\n }\n return sum;\n}\n\n// Render float to bytes according to IEEE 754 Floating Point\nvec4 encode_float_1604150559(float val) {\n\n // TODO: correctly handle denormal numbers\n // http://www.2ality.com/2012/04/number-encoding.html\n float a = abs(val); // encode absolute value + sign\n float exp = floor(log2(a)); // number of powers of 2\n float mant = pow(2.,log2(a)-exp) * pow(2.,23.); // multiply to fill 24 bits (implied leading 1)\n float mant1 = floor(mant / 256. / 256.); // first 8 bits of mantissa\n float mant2 = mod(floor(mant / 256.),256.); // second 8 bits\n float mant3 = mod(mant,256.); // third 8 bits\n\n highp float sign = 128.-128.*(a/val); // sign bit is 256 or 0\n highp float e = (sign+exp+127.)/510.; // exponent and sign\n highp float m1 = (mant1-(128.*(1.-mod(exp+127.,2.))))/255.; // handle leading bit\n highp float m2 = (mant2)/255.; // middle part\n highp float m3 = (mant3+.5)/255.; // scale to 0 - 255\n\n return vec4(m3,m2,m1,e);\n}\n\n// select an element from a vector based on index\nfloat select_index_1117569599(vec4 v, int index){\n float val;\n if (index == 0) {\n val = v.r;\n } else if(index == 1) {\n val = v.g;\n } else if(index == 2) {\n val = v.b;\n } else if(index == 3){\n val = v.a;\n } else {\n // should never be here\n val = 0.0;\n }\n\n return val;\n}\n\nvoid main(void) {\n\n // get the implied row and column from .y and .x of passed (output)\n // texture coordinate. These map directly to input texture space when\n // the relevant dimensions are the same.\n float row_t = outTex.y;\n float col_t = outTex.x;\n vec4 c_vec = texture2D(C, vec2(col_t, 0.5));\n\n // should be -0.5, but that subtly breaks at zero\n float col = col_t * float(N + pad); // index of first element in pixel (matrix space)\n int channel = int(mod(col, 4.0 ));\n float c = select_index_1117569599(c_vec, channel);\n\n // sum row x col for the passed pixel\n float sum = alpha * dot_rowcol_1540259130(row_t, col_t * float(N + pad)/float(N), A, B_t, K);\n sum += beta * c;\n\n if (sum == 0.) {\n gl_FragColor = vec4(0.,0.,0.,0.);\n return;\n }\n\n // output vec4 with bytes for an IEEE754 32-bit floating point number\n gl_FragColor = encode_float_1604150559(sum);\n}\n",a="#define GLSLIFY 1\n// fragment shader that calculates the matrix product and writes each\n// element to a pixel component in a floating point texture.\n// the output RGBA canvas.\n// readPixel is used to read the bytes.\n\nprecision highp float;\n\nvarying vec2 outTex; // texture coords of row/column to calculate\nuniform sampler2D A; // texture with data from padded A\nuniform sampler2D B_t; // texture with data from padded transpose of B\nuniform int K; // number of elements in shared dimension\nuniform int N; // number of columns in output\nuniform int pad; //\nuniform float alpha; // coefficient to multiplication\n\n// sum of products between elements in row i (from A) x col j (from B)\n\n// Calculate the dot product between the row (from A) and column (from B)\n// identified by the passed indeces (output texture coordinate space).\n// We loop over elements in the row and column and sum the product\n// using the glsl `dot` function to process four elements at a time.\n// This four element optimization requires that the matrix B be\n// transposed before texel packing and that both matrices be padded\n// (with zeros) to a multiple of four (4) in their shared dimension.\nfloat dot_rowcol_1540259130(float y, float x, sampler2D A, sampler2D B_t, int K) {\n float delta_t = 1./float(K);// space (on texture) between elements\n float sum = 0.; // sum for this row/column pair\n float z = 0.5 * (4.0 * delta_t);// position for shared dimension on source textures\n\n for (int l=0 ; l<4096 ; ++l) {\n if(l >= K / 4) break; // stop when we finish the row/column\n // l is in pixel space, so we divide by four\n\n // retrieve next four elements from each texture\n vec4 a_ik = texture2D( A, vec2(z, y));\n vec4 b_kj = texture2D(B_t, vec2(z, x));\n\n // use `dot` to process four elements at a time\n sum += dot(a_ik, b_kj);\n z += (4.0 * delta_t); // (z + 0.5)*delta\n }\n return sum;\n}\n\nvoid main(void) {\n\n // get the implied row and column from .y and .x of passed (output)\n // texture coordinate. These map directly to input texture space when\n // the relevant dimensions are the same.\n float row_t = outTex.y;\n float col_t = outTex.x;\n\n vec4 sum_v = vec4(0.0, 0.0, 0.0, 0.0);\n float col = (col_t * float(N + pad) - 2.0); // index of first element in pixel (matrix space)\n sum_v.r = alpha * dot_rowcol_1540259130(row_t, (col + 0.5)/float(N), A, B_t, K);\n // is last element in pixel past row length?\n if(pad > 0 && (col + 4.0) > float(N) ) {\n // compute elements in padded region\n if(pad < 3){\n sum_v.g = alpha * dot_rowcol_1540259130(row_t, (col + 1.5)/float(N), A, B_t, K);\n }\n if(pad < 2){\n sum_v.b = alpha * dot_rowcol_1540259130(row_t, (col + 2.5)/float(N), A, B_t, K);\n }\n } else {\n sum_v.g = alpha * dot_rowcol_1540259130(row_t, (col + 1.5)/float(N), A, B_t, K);\n sum_v.b = alpha * dot_rowcol_1540259130(row_t, (col + 2.5)/float(N), A, B_t, K);\n sum_v.a = alpha * dot_rowcol_1540259130(row_t, (col + 3.5)/float(N), A, B_t, K);\n }\n\n gl_FragColor = sum_v;\n}\n",r="#define GLSLIFY 1\n// fragment shader that calculates the matrix product and writes each\n// element to a pixel component in a floating point texture.\n// the output RGBA canvas.\n// readPixel is used to read the bytes.\n\nprecision highp float;\n\nvarying vec2 outTex; // texture coords of row/column to calculate\nuniform sampler2D A; // texture with data from padded A\nuniform sampler2D B_t; // texture with data from padded transpose of B\nuniform sampler2D C; // texture with data from C\nuniform int K; // number of elements in shared dimension\nuniform int N; // number of columns in output\nuniform int pad; //\nuniform float alpha; // coefficient to multiplication\nuniform float beta; // coefficient to addition\n\n// sum of products between elements in row i (from A) x col j (from B)\n\n// Calculate the dot product between the row (from A) and column (from B)\n// identified by the passed indeces (output texture coordinate space).\n// We loop over elements in the row and column and sum the product\n// using the glsl `dot` function to process four elements at a time.\n// This four element optimization requires that the matrix B be\n// transposed before texel packing and that both matrices be padded\n// (with zeros) to a multiple of four (4) in their shared dimension.\nfloat dot_rowcol_1540259130(float y, float x, sampler2D A, sampler2D B_t, int K) {\n float delta_t = 1./float(K);// space (on texture) between elements\n float sum = 0.; // sum for this row/column pair\n float z = 0.5 * (4.0 * delta_t);// position for shared dimension on source textures\n\n for (int l=0 ; l<4096 ; ++l) {\n if(l >= K / 4) break; // stop when we finish the row/column\n // l is in pixel space, so we divide by four\n\n // retrieve next four elements from each texture\n vec4 a_ik = texture2D( A, vec2(z, y));\n vec4 b_kj = texture2D(B_t, vec2(z, x));\n\n // use `dot` to process four elements at a time\n sum += dot(a_ik, b_kj);\n z += (4.0 * delta_t); // (z + 0.5)*delta\n }\n return sum;\n}\n\nvoid main(void) {\n\n // get the implied row and column from .y and .x of passed (output)\n // texture coordinate. These map directly to input texture space when\n // the relevant dimensions are the same.\n float row_t = outTex.y;\n float col_t = outTex.x;\n vec4 c_v = texture2D(C, vec2(col_t, 0.5));\n\n vec4 sum_v = vec4(0.0, 0.0, 0.0, 0.0);\n float col = (col_t * float(N + pad) - 2.0); // index of first element in pixel (matrix space)\n sum_v.r = alpha * dot_rowcol_1540259130(row_t, (col + 0.5)/float(N), A, B_t, K);\n // in the padding region?\n if(pad > 0 && (col + 4.0) > float(N) ) {\n // pad\n if(pad < 3){\n sum_v.g = alpha * dot_rowcol_1540259130(row_t, (col + 1.5)/float(N), A, B_t, K);\n }\n if(pad < 2){\n sum_v.b = alpha * dot_rowcol_1540259130(row_t, (col + 2.5)/float(N), A, B_t, K);\n }\n } else {\n sum_v.g = alpha * dot_rowcol_1540259130(row_t, (col + 1.5)/float(N), A, B_t, K);\n sum_v.b = alpha * dot_rowcol_1540259130(row_t, (col + 2.5)/float(N), A, B_t, K);\n sum_v.a = alpha * dot_rowcol_1540259130(row_t, (col + 3.5)/float(N), A, B_t, K);\n }\n\n gl_FragColor = sum_v + beta*c_v;\n}\n";this.standalone?(this.program_=this.webgl.createProgram(n),this.program_c=this.webgl.createProgram(o)):(this.program_=this.webgl.createProgram(a),this.program_c=this.webgl.createProgram(r))}var WebGL=require("./webgl");module.exports=SGEMMCalculator,SGEMMCalculator.TEXTURE_UNIFORM_NAME_0="A",SGEMMCalculator.TEXTURE_UNIFORM_NAME_1="B_t",SGEMMCalculator.TEXTURE_UNIFORM_NAME_2="C",SGEMMCalculator.SHARED_LENGTH_UNIFORM_NAME="K",SGEMMCalculator.COLUMN_COUNT_UNIFORM_NAME="N",SGEMMCalculator.PAD_UNIFORM_NAME="pad",SGEMMCalculator.ALPHA_UNIFORM_NAME="alpha",SGEMMCalculator.BETA_UNIFORM_NAME="beta",SGEMMCalculator.prototype.calculate=function(t,e,n,o,a,r,i,l,s){var u=this.webgl.context;null!=l?this.program=this.program_c:(i=null,this.program=this.program_),this.webgl.selectProgram(this.program),this.bindInputTexture(a,u.TEXTURE0,SGEMMCalculator.TEXTURE_UNIFORM_NAME_0),this.bindInputTexture(r,u.TEXTURE1,SGEMMCalculator.TEXTURE_UNIFORM_NAME_1),null!=l&&this.bindInputTexture(l,u.TEXTURE2,SGEMMCalculator.TEXTURE_UNIFORM_NAME_2);var m=this.webgl.getPad(n),d=this.webgl.getPad(e);this.bindUniforms(e,n+m,d,o,i),this.standalone?this.webgl.bindOutputTexture(t,e+d,s):this.webgl.bindOutputTexture(t,(e+d)/4,s),u.drawElements(u.TRIANGLES,6,u.UNSIGNED_SHORT,0),this.webgl.unbindInputTexture(u.TEXTURE0),this.webgl.unbindInputTexture(u.TEXTURE1),this.webgl.unbindInputTexture(u.TEXTURE2)},SGEMMCalculator.prototype.bindInputTexture=function(t,e,n){var o=this.webgl.context,a=this.program;o.activeTexture(e),o.bindTexture(o.TEXTURE_2D,t);var r=o.getUniformLocation(a,n);o.uniform1i(r,e-o.TEXTURE0)},SGEMMCalculator.prototype.bindUniforms=function(t,e,n,o,a){var r=this.webgl.context,i=r.getUniformLocation(this.program,SGEMMCalculator.SHARED_LENGTH_UNIFORM_NAME),l=r.getUniformLocation(this.program,SGEMMCalculator.ALPHA_UNIFORM_NAME),s=r.getUniformLocation(this.program,SGEMMCalculator.BETA_UNIFORM_NAME),u=r.getUniformLocation(this.program,SGEMMCalculator.COLUMN_COUNT_UNIFORM_NAME),m=m=r.getUniformLocation(this.program,SGEMMCalculator.PAD_UNIFORM_NAME);r.uniform1f(s,a),r.uniform1i(u,t),r.uniform1i(m,n),r.uniform1i(i,e),r.uniform1f(l,o)};
15 | },{"./webgl":11}],8:[function(require,module,exports){
16 | function SSCALCalculator(n,t){this.webgl=n,this.standalone=null!=t?t:!0;var e="#define GLSLIFY 1\nprecision highp float;\n\nvarying vec2 outTex; // texture coords of row/column to calculate\nuniform sampler2D X; // texture with data from padded X\nuniform int N; // number of columns\nuniform int pad; // additional columns to nearest multiple of four\nuniform float b; // additive term\nuniform float a; // multiplicative term\n\n// Render float to bytes according to IEEE 754 Floating Point\nvec4 encode_float_1540259130(float val) {\n\n // TODO: correctly handle denormal numbers\n // http://www.2ality.com/2012/04/number-encoding.html\n float a = abs(val); // encode absolute value + sign\n float exp = floor(log2(a)); // number of powers of 2\n float mant = pow(2.,log2(a)-exp) * pow(2.,23.); // multiply to fill 24 bits (implied leading 1)\n float mant1 = floor(mant / 256. / 256.); // first 8 bits of mantissa\n float mant2 = mod(floor(mant / 256.),256.); // second 8 bits\n float mant3 = mod(mant,256.); // third 8 bits\n\n highp float sign = 128.-128.*(a/val); // sign bit is 256 or 0\n highp float e = (sign+exp+127.)/510.; // exponent and sign\n highp float m1 = (mant1-(128.*(1.-mod(exp+127.,2.))))/255.; // handle leading bit\n highp float m2 = (mant2)/255.; // middle part\n highp float m3 = (mant3+.5)/255.; // scale to 0 - 255\n\n return vec4(m3,m2,m1,e);\n}\n\n// select an element from a vector based on index\nfloat select_index_1604150559(vec4 v, int index){\n float val;\n if (index == 0) {\n val = v.r;\n } else if(index == 1) {\n val = v.g;\n } else if(index == 2) {\n val = v.b;\n } else if(index == 3){\n val = v.a;\n } else {\n // should never be here\n val = 0.0;\n }\n\n return val;\n}\n\nvoid main(void) {\n\n // get the implied row and column from .y and .x of passed (output)\n // texture coordinate. These map directly to input texture space when\n // the relevant dimensions are the same.\n float row = outTex.y;\n float col = outTex.x;\n\n // direct usage of col requires output be padded exactly like input\n vec4 x = texture2D( X, vec2(col, row));\n vec4 sum_v = (a * x) + b;\n int channel = int(mod(col * float(N + pad), 4.0 ));\n float sum = select_index_1604150559(sum_v, channel);\n\n if (sum == 0.) {\n gl_FragColor = vec4(0.,0.,0.,0.);\n return;\n }\n\n // output vec4 with bytes for an IEEE754 32-bit floating point number\n gl_FragColor = encode_float_1540259130(sum);\n}\n",o="#define GLSLIFY 1\nprecision highp float;\n\nvarying vec2 outTex; // texture coords of row/column to calculate\nuniform sampler2D X; // texture with data from padded X\nuniform int N; // number of columns\nuniform int pad; // additional columns to nearest multiple of four\nuniform float b; // additive term\nuniform float a; // multiplicative term\n\n// set pad values to 0.0, if in padded region of output texture\nvoid fix_pad_1540259130(inout vec4 v, int pad){\n v.a = 0.0;\n if(pad == 2){\n v.b = 0.0;\n } else if(pad == 3){\n v.b = 0.0;\n v.g = 0.0;\n }\n}\n\nvoid main(void) {\n\n // get the implied row and column from .y and .x of passed (output)\n // texture coordinate. These map directly to input texture space when\n // the relevant dimensions are the same.\n float row_t = outTex.y;\n float col_t = outTex.x;\n float col = (col_t * float(N + pad) - 2.0); // index of first element in pixel (matrix space)\n\n // direct usage of col requires output be padded exactly like input\n vec4 x = texture2D( X, vec2(col_t, row_t));\n vec4 sum_v = (a * x) + b;\n\n // fix padded region\n if(pad > 0 && col + 4.0 > float(N) ) {\n fix_pad_1540259130(sum_v, pad);\n }\n\n gl_FragColor = sum_v;\n}\n";this.standalone?this.program=this.webgl.createProgram(e):this.program=this.webgl.createProgram(o)}var WebGL=require("./webgl");module.exports=SSCALCalculator,SSCALCalculator.TEXTURE_UNIFORM_NAME_0="X",SSCALCalculator.LENGTH_UNIFORM_NAME="N",SSCALCalculator.ADD_UNIFORM_NAME="b",SSCALCalculator.MUL_UNIFORM_NAME="a",SSCALCalculator.prototype.calculate=function(n,t,e,o,a,i){var l=this.webgl.context,r=t%WebGL.COMPONENTS_PER_TEXEL,u=0==r?0:WebGL.COMPONENTS_PER_TEXEL-r;this.webgl.selectProgram(this.program),this.bindInputTexture(a,l.TEXTURE0,SSCALCalculator.TEXTURE_UNIFORM_NAME_0),this.bindUniforms(t,u,e,o),this.standalone?this.webgl.bindOutputTexture(n,t+u,i):this.webgl.bindOutputTexture(n,(t+u)/4,i),l.drawElements(l.TRIANGLES,6,l.UNSIGNED_SHORT,0),this.webgl.unbindInputTexture(l.TEXTURE0)},SSCALCalculator.prototype.bindInputTexture=function(n,t,e){var o=this.webgl.context,a=this.program;o.activeTexture(t),o.bindTexture(o.TEXTURE_2D,n);var i=o.getUniformLocation(a,e);o.uniform1i(i,t-o.TEXTURE0)},SSCALCalculator.prototype.bindUniforms=function(n,t,e,o){var a=this.webgl.context,i=a.getUniformLocation(this.program,SSCALCalculator.LENGTH_UNIFORM_NAME),l=a.getUniformLocation(this.program,SSCALCalculator.ADD_UNIFORM_NAME),r=a.getUniformLocation(this.program,SSCALCalculator.MUL_UNIFORM_NAME),u=a.getUniformLocation(this.program,"pad");a.uniform1i(i,n),a.uniform1i(u,t),a.uniform1f(r,e),a.uniform1f(l,o)};
17 | },{"./webgl":11}],9:[function(require,module,exports){
18 | function Tensor(e,t){if(2!=e.length)throw new Error("Only Tensor of order two (matrix) is supported right now.");var r=e[0],s=e[1];this.texture=gl.createDataTexture(r,s,t),this.shape=[r,s]}var globals=require("./globals"),gl=globals.gl;module.exports=Tensor,Tensor.prototype["delete"]=function(){gl.context.deleteTexture(this.texture),this.texture=null,this.shape=null},Tensor.prototype.transfer=function(e){var t,r,s=this.shape[0],o=this.shape[1];return t=gl.createOutputTexture(s,o),gl.encode(s,o,this.texture,t),r=new Float32Array(gl.readData(s,o)),gl.context.deleteTexture(t),e||this["delete"](),r},Tensor.prototype.reshape=function(e,t){var r=this.shape[0],s=this.shape[1],o=e[0],n=e[1],l=new Tensor(e,null);return gl.reshape(r,s,o,n,this.texture,l.texture),t||this["delete"](),l},Tensor.prototype.transpose=function(e){var t=this.shape[0],r=this.shape[1],s=new Tensor([r,t],null);return gl.transpose(t,r,this.texture,s.texture),e||this["delete"](),s};
19 | },{"./globals":2}],10:[function(require,module,exports){
20 | var async=require("async"),loader=require("floader");test={},test.allclose=function(e,t,r,a){if(r=r||1e-5,a=a||1e-8,e.length!=t.length)return console.log("lengths not equal: "+e.length+", "+t.length),{result:!1,index:null};for(var n,l=0;la;a++){for(var n=[],l=0;t>l;l++)n[l]=Math.random()/Math.sqrt(e);r.push(n)}return r},test.padData=function(e,t,r,a){for(var n=new Float32Array(e*(t+r)),l=0;e>l;l++)n.set(a.subarray(l*t,(l+1)*t),l*(t+r));return n},test.load=function(e,t,r){var a=t.map(function(t){return e+t});async.map(a,loader.load,function(e,t){if(e)return r(e);var a=t.map(JSON.parse);r(e,a)})},test.assert={},test.assert.allclose=function(e,t,r,a,n,l){var s=test.allclose(t,r,n,l),o="[",u="[";if(!s.result){s.index>1&&(o+="..., ",u+="..., "),s.index>0&&(o+=t[s.index-1]+", ",u+=r[s.index-1]+", "),o+="-->",u+="-->";for(var i=s.index;i 0 && (col + 4.0) > float(N) ) {\n // compute elements in padded region\n if(npad < 3){\n col_v.g = select_index_1540259130(row_2, channel);\n }\n if(npad < 2){\n col_v.b = select_index_1540259130(row_3, channel);\n }\n } else {\n col_v.g = select_index_1540259130(row_2, channel);\n col_v.b = select_index_1540259130(row_3, channel);\n col_v.a = select_index_1540259130(row_4, channel);\n }\n\n gl_FragColor = col_v;\n}\n",s="#define GLSLIFY 1\nprecision highp float;\n\nvarying vec2 outTex; // texture coords of row/column to calculate\nuniform sampler2D A; // texture with data from padded A\nuniform float M; // number of rows in output\nuniform float N; // number of columns in output\nuniform float pad; // column padding in output\nuniform float M_in; // number of rows in input\nuniform float N_in; // number of columns in input\nuniform float pad_in; // column padding in input\n\n/* number of input pixels\n origin index (channel) for each\n termination index (channel) for each\n destination origin index (channel) for each\n */\n// select an element from a vector based on index\nfloat select_index_1604150559(vec4 v, int index){\n float val;\n if (index == 0) {\n val = v.r;\n } else if(index == 1) {\n val = v.g;\n } else if(index == 2) {\n val = v.b;\n } else if(index == 3){\n val = v.a;\n } else {\n // should never be here\n val = 0.0;\n }\n\n return val;\n}\n\n// set pad values to 0.0, if in padded region of output texture\nvoid fix_pad_1540259130(inout vec4 v, int pad){\n v.a = 0.0;\n if(pad == 2){\n v.b = 0.0;\n } else if(pad == 3){\n v.b = 0.0;\n v.g = 0.0;\n }\n}\n\n// translate a linear index into x, y coordinates for a matrix\nvec2 linear_index_coords(float linear_index, float row_length){\n vec2 coords;\n\n coords.x = floor(mod(linear_index + 0.5, row_length)); // column\n coords.y = floor((linear_index + 0.5) / row_length); // row\n\n return coords;\n}\n\nvoid main(void) {\n\n // get the implied row and column from .y and .x of passed (output)\n // texture coordinate. These map directly to input texture space when\n // the relevant dimensions are the same.\n float row_t = outTex.y;\n float col_t = outTex.x;\n\n float row = floor(row_t * M);\n float col_0 = (col_t * (N + pad) - 2.0); // index of first element in pixel (matrix space)\n //float col_0 = floor(col_t * (N + pad)/4.0)*4.0; // index of first element in pixel (matrix space)\n float lin_index_0 = row * N + col_0; // linearized index of first element in pixel in output\n\n vec4 pixel_in = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 result = vec4(0.0, 0.0, 0.0, 0.0);\n vec2 coords = linear_index_coords(lin_index_0, N_in);\n vec2 ncoords;\n int current_pixel_index = int(mod(coords.x, 4.0));\n\n pixel_in = texture2D(A, vec2((coords.x + 0.5)/(N_in + pad_in), (coords.y + 0.5)/M_in));\n\n // go through channels for current output pixel\n for(int i = 0; i < 4; i++){\n\n // are we on a new input pixel?\n ncoords = linear_index_coords(lin_index_0 + float(i), N_in);\n if(floor(coords.x/4.0) != floor(ncoords.x/4.0) || coords.y != ncoords.y){\n coords = ncoords;\n pixel_in = texture2D(A, vec2((coords.x + 0.5)/(N_in + pad_in), (coords.y + 0.5)/M_in));\n current_pixel_index = 0;\n }\n\n if(i == 0){\n result.r = select_index_1604150559(pixel_in, current_pixel_index);\n } else if(i == 1){\n result.g = select_index_1604150559(pixel_in, current_pixel_index);\n } else if(i == 2){\n result.b = select_index_1604150559(pixel_in, current_pixel_index);\n } else {\n result.a = select_index_1604150559(pixel_in, current_pixel_index);\n }\n\n current_pixel_index++;\n }\n\n // are we in the padded (output) region?\n if(pad > 0.0 && col_0 + 3.5 > N ) {\n fix_pad_1540259130(result, int(pad));\n }\n\n gl_FragColor = result;\n}\n";this.encode_program=this.createProgram(a),this.transpose_program=this.createProgram(l),this.reshape_program=this.createProgram(s)}module.exports=WebGL,WebGL.COMPONENTS_PER_TEXEL=4,WebGL.POSITION_UNIFORM_NAME="pos",WebGL.TEXTURE_UNIFORM_NAME="tex",WebGL.prototype.encode=function(e,t,n,o){this.program=this.encode_program,this.selectProgram(this.program);var r=this.getPad(t),i=this.context.getUniformLocation(this.program,"N"),a=this.context.getUniformLocation(this.program,"pad");this.context.uniform1i(i,t),this.context.uniform1i(a,r),this.bindInputTexture(n,this.context.TEXTURE0,"A"),this.bindOutputTexture(e,t,o),this.context.drawElements(this.context.TRIANGLES,6,this.context.UNSIGNED_SHORT,0),this.unbindInputTexture(this.context.TEXTURE0)},WebGL.prototype.transpose=function(e,t,n,o){this.program=this.transpose_program,this.selectProgram(this.program);var r=this.getPad(t),i=this.getPad(e),a=this.context.getUniformLocation(this.program,"N"),l=this.context.getUniformLocation(this.program,"npad"),s=this.context.getUniformLocation(this.program,"M"),c=this.context.getUniformLocation(this.program,"mpad");this.context.uniform1i(a,e),this.context.uniform1i(l,i),this.context.uniform1i(s,t),this.context.uniform1i(c,r),this.bindInputTexture(n,this.context.TEXTURE0,"A"),this.bindOutputTexture(t,(e+i)/4,o),this.context.drawElements(this.context.TRIANGLES,6,this.context.UNSIGNED_SHORT,0),this.unbindInputTexture(this.context.TEXTURE0)},WebGL.prototype.reshape=function(e,t,n,o,r,i){this.program=this.reshape_program,this.selectProgram(this.program);var a=this.getPad(t),l=this.getPad(o),s=this.context.getUniformLocation(this.program,"M"),c=this.context.getUniformLocation(this.program,"N"),f=this.context.getUniformLocation(this.program,"pad"),u=this.context.getUniformLocation(this.program,"M_in"),d=this.context.getUniformLocation(this.program,"N_in"),x=this.context.getUniformLocation(this.program,"pad_in");this.context.uniform1f(s,n),this.context.uniform1f(c,o),this.context.uniform1f(f,l),this.context.uniform1f(u,e),this.context.uniform1f(d,t),this.context.uniform1f(x,a),this.bindInputTexture(r,this.context.TEXTURE0,"A"),this.bindOutputTexture(n,(o+l)/4,i),this.context.drawElements(this.context.TRIANGLES,6,this.context.UNSIGNED_SHORT,0),this.unbindInputTexture(this.context.TEXTURE0)},WebGL.prototype.bindInputTexture=function(e,t,n){var o=this.context,r=this.program;o.activeTexture(t),o.bindTexture(o.TEXTURE_2D,e);var i=o.getUniformLocation(r,n);o.uniform1i(i,t-o.TEXTURE0)},WebGL.prototype.createProgram=function(e){var t,n=this.context;if(t=n.createShader(n.FRAGMENT_SHADER),n.shaderSource(t,e),n.compileShader(t),0==n.getShaderParameter(t,n.COMPILE_STATUS))throw new Error(n.getShaderInfoLog(t));var o=n.createProgram();return n.attachShader(o,this.vertexShader),n.attachShader(o,t),n.linkProgram(o),o},WebGL.prototype.selectProgram=function(e){var t=this.context;t.useProgram(e),this.bindVertices(e)},WebGL.prototype.bindVertices=function(e){var t=this.context,n=e,o=t.getAttribLocation(n,WebGL.POSITION_UNIFORM_NAME),r=t.createBuffer();t.bindBuffer(t.ARRAY_BUFFER,r);var i=[-1,-1,0,1,-1,0,1,1,0,-1,1,0];t.bufferData(t.ARRAY_BUFFER,new Float32Array(i),t.STATIC_DRAW),t.vertexAttribPointer(o,3,t.FLOAT,!1,0,0),t.enableVertexAttribArray(o);var a=t.getAttribLocation(n,WebGL.TEXTURE_UNIFORM_NAME),l=t.createBuffer();t.bindBuffer(t.ARRAY_BUFFER,l);var s=[0,0,1,0,1,1,0,1];t.bufferData(t.ARRAY_BUFFER,new Float32Array(s),t.STATIC_DRAW),t.vertexAttribPointer(a,2,t.FLOAT,!1,0,0),t.enableVertexAttribArray(a);var c=t.createBuffer();t.bindBuffer(t.ELEMENT_ARRAY_BUFFER,c);var f=[0,1,2,0,2,3];t.bufferData(t.ELEMENT_ARRAY_BUFFER,new Uint16Array(f),t.STATIC_DRAW)},WebGL.prototype.createDataTexture=function(e,t,n){var o=this.context,r=[0,0,0,0],i=t%WebGL.COMPONENTS_PER_TEXEL,a=0==i?0:WebGL.COMPONENTS_PER_TEXEL-i,l=o.createTexture();if(o.bindTexture(o.TEXTURE_2D,l),0==a||null==n||"undefined"==typeof n)o.texImage2D(o.TEXTURE_2D,0,o.RGBA,(t+a)/WebGL.COMPONENTS_PER_TEXEL,e,0,o.RGBA,o.FLOAT,n);else{o.texImage2D(o.TEXTURE_2D,0,o.RGBA,(t+a)/WebGL.COMPONENTS_PER_TEXEL,e,0,o.RGBA,o.FLOAT,null);for(var s,c,f=t-i,u=f/WebGL.COMPONENTS_PER_TEXEL,d=0,x=new Float32Array(r),_=0;e>_;_++)d=_*t,full_texel_row_end=d+f,s=new Float32Array(n.buffer,d*n.BYTES_PER_ELEMENT,f),f>0&&o.texSubImage2D(o.TEXTURE_2D,0,0,_,u,1,o.RGBA,o.FLOAT,s),c=new Float32Array(n.buffer,full_texel_row_end*n.BYTES_PER_ELEMENT,i),x.set(c),o.texSubImage2D(o.TEXTURE_2D,0,u,_,1,1,o.RGBA,o.FLOAT,x)}return o.texParameteri(o.TEXTURE_2D,o.TEXTURE_WRAP_S,o.CLAMP_TO_EDGE),o.texParameteri(o.TEXTURE_2D,o.TEXTURE_WRAP_T,o.CLAMP_TO_EDGE),o.texParameteri(o.TEXTURE_2D,o.TEXTURE_MAG_FILTER,o.NEAREST),o.texParameteri(o.TEXTURE_2D,o.TEXTURE_MIN_FILTER,o.NEAREST),o.bindTexture(o.TEXTURE_2D,null),l},WebGL.prototype.createOutputTexture=function(e,t){var n=this.context,o=this.getPad(t),r=n.createTexture();return n.bindTexture(n.TEXTURE_2D,r),n.texImage2D(n.TEXTURE_2D,0,n.RGBA,t+o,e,0,n.RGBA,n.UNSIGNED_BYTE,null),n.texParameteri(n.TEXTURE_2D,n.TEXTURE_WRAP_S,n.CLAMP_TO_EDGE),n.texParameteri(n.TEXTURE_2D,n.TEXTURE_WRAP_T,n.CLAMP_TO_EDGE),n.texParameteri(n.TEXTURE_2D,n.TEXTURE_MAG_FILTER,n.NEAREST),n.texParameteri(n.TEXTURE_2D,n.TEXTURE_MIN_FILTER,n.NEAREST),n.bindTexture(n.TEXTURE_2D,null),r},WebGL.prototype.bindOutputTexture=function(e,t,n){var o=this.context;if(this.canvas.height=e,this.canvas.width=t,o.viewport(0,0,t,e),this.framebuffer=this.framebuffer||o.createFramebuffer(),o.bindFramebuffer(o.FRAMEBUFFER,this.framebuffer),o.framebufferTexture2D(o.FRAMEBUFFER,o.COLOR_ATTACHMENT0,o.TEXTURE_2D,n,0),o.checkFramebufferStatus(o.FRAMEBUFFER)!=o.FRAMEBUFFER_COMPLETE)throw new Error("Bound framebuffer is not complete.");return this.framebuffer},WebGL.prototype.unbindInputTexture=function(e){var t=this.context;t.activeTexture(e),t.bindTexture(t.TEXTURE_2D,null)},WebGL.prototype.readData=function(e,t){var n=this.context;return rawbuffer=new ArrayBuffer(e*t*Float32Array.BYTES_PER_ELEMENT),prod=new Uint8Array(rawbuffer),n.readPixels(0,0,t,e,n.RGBA,n.UNSIGNED_BYTE,prod),rawbuffer},WebGL.prototype.getPad=function(e){var t=e%WebGL.COMPONENTS_PER_TEXEL,n=0==t?0:WebGL.COMPONENTS_PER_TEXEL-t;return n};
23 | },{}],12:[function(require,module,exports){
24 | (function (process,global){
25 | !function(){function n(){}function t(n){return n}function e(n){return!!n}function r(n){return!n}function u(n){return function(){if(null===n)throw new Error("Callback was already called.");n.apply(this,arguments),n=null}}function i(n){return function(){null!==n&&(n.apply(this,arguments),n=null)}}function o(n){return M(n)||"number"==typeof n.length&&n.length>=0&&n.length%1===0}function c(n,t){for(var e=-1,r=n.length;++er?r:null}):(e=W(n),t=e.length,function(){return r++,t>r?e[r]:null})}function m(n,t){return t=null==t?n.length-1:+t,function(){for(var e=Math.max(arguments.length-t,0),r=Array(e),u=0;e>u;u++)r[u]=arguments[u+t];switch(t){case 0:return n.call(this,r);case 1:return n.call(this,arguments[0],r)}}}function y(n){return function(t,e,r){return n(t,r)}}function v(t){return function(e,r,o){o=i(o||n),e=e||[];var c=h(e);if(0>=t)return o(null);var a=!1,f=0,l=!1;!function s(){if(a&&0>=f)return o(null);for(;t>f&&!l;){var n=c();if(null===n)return a=!0,void(0>=f&&o(null));f+=1,r(e[n],n,u(function(n){f-=1,n?(o(n),l=!0):s()}))}}()}}function d(n){return function(t,e,r){return n(C.eachOf,t,e,r)}}function g(n){return function(t,e,r,u){return n(v(e),t,r,u)}}function k(n){return function(t,e,r){return n(C.eachOfSeries,t,e,r)}}function b(t,e,r,u){u=i(u||n),e=e||[];var c=o(e)?[]:{};t(e,function(n,t,e){r(n,function(n,r){c[t]=r,e(n)})},function(n){u(n,c)})}function w(n,t,e,r){var u=[];n(t,function(n,t,r){e(n,function(e){e&&u.push({index:t,value:n}),r()})},function(){r(a(u.sort(function(n,t){return n.index-t.index}),function(n){return n.value}))})}function O(n,t,e,r){w(n,t,function(n,t){e(n,function(n){t(!n)})},r)}function S(n,t,e){return function(r,u,i,o){function c(){o&&o(e(!1,void 0))}function a(n,r,u){return o?void i(n,function(r){o&&t(r)&&(o(e(!0,n)),o=i=!1),u()}):u()}arguments.length>3?n(r,u,a,c):(o=i,i=u,n(r,a,c))}}function E(n,t){return t}function L(t,e,r){r=r||n;var u=o(e)?[]:{};t(e,function(n,t,e){n(m(function(n,r){r.length<=1&&(r=r[0]),u[t]=r,e(n)}))},function(n){r(n,u)})}function I(n,t,e,r){var u=[];n(t,function(n,t,r){e(n,function(n,t){u=u.concat(t||[]),r(n)})},function(n){r(n,u)})}function x(t,e,r){function i(t,e,r,u){if(null!=u&&"function"!=typeof u)throw new Error("task callback must be a function");return t.started=!0,M(e)||(e=[e]),0===e.length&&t.idle()?C.setImmediate(function(){t.drain()}):(c(e,function(e){var i={data:e,callback:u||n};r?t.tasks.unshift(i):t.tasks.push(i),t.tasks.length===t.concurrency&&t.saturated()}),void C.setImmediate(t.process))}function o(n,t){return function(){f-=1;var e=!1,r=arguments;c(t,function(n){c(l,function(t,r){t!==n||e||(l.splice(r,1),e=!0)}),n.callback.apply(n,r)}),n.tasks.length+f===0&&n.drain(),n.process()}}if(null==e)e=1;else if(0===e)throw new Error("Concurrency must not be zero");var f=0,l=[],s={tasks:[],concurrency:e,payload:r,saturated:n,empty:n,drain:n,started:!1,paused:!1,push:function(n,t){i(s,n,!1,t)},kill:function(){s.drain=n,s.tasks=[]},unshift:function(n,t){i(s,n,!0,t)},process:function(){if(!s.paused&&f=t;t++)C.setImmediate(s.process)}}};return s}function j(n){return m(function(t,e){t.apply(null,e.concat([m(function(t,e){"object"==typeof console&&(t?console.error&&console.error(t):console[n]&&c(e,function(t){console[n](t)}))})]))})}function A(n){return function(t,e,r){n(f(t),e,r)}}function T(n){return m(function(t,e){var r=m(function(e){var r=this,u=e.pop();return n(t,function(n,t,u){n.apply(r,e.concat([u]))},u)});return e.length?r.apply(this,e):r})}function z(n){return m(function(t){var e=t.pop();t.push(function(){var n=arguments;r?C.setImmediate(function(){e.apply(null,n)}):e.apply(null,n)});var r=!0;n.apply(this,t),r=!1})}var q,C={},P="object"==typeof self&&self.self===self&&self||"object"==typeof global&&global.global===global&&global||this;null!=P&&(q=P.async),C.noConflict=function(){return P.async=q,C};var H=Object.prototype.toString,M=Array.isArray||function(n){return"[object Array]"===H.call(n)},U=function(n){var t=typeof n;return"function"===t||"object"===t&&!!n},W=Object.keys||function(n){var t=[];for(var e in n)n.hasOwnProperty(e)&&t.push(e);return t},B="function"==typeof setImmediate&&setImmediate,D=B?function(n){B(n)}:function(n){setTimeout(n,0)};"object"==typeof process&&"function"==typeof process.nextTick?C.nextTick=process.nextTick:C.nextTick=D,C.setImmediate=B?D:C.nextTick,C.forEach=C.each=function(n,t,e){return C.eachOf(n,y(t),e)},C.forEachSeries=C.eachSeries=function(n,t,e){return C.eachOfSeries(n,y(t),e)},C.forEachLimit=C.eachLimit=function(n,t,e,r){return v(t)(n,y(e),r)},C.forEachOf=C.eachOf=function(t,e,r){function o(n){f--,n?r(n):null===c&&0>=f&&r(null)}r=i(r||n),t=t||[];for(var c,a=h(t),f=0;null!=(c=a());)f+=1,e(t[c],c,u(o));0===f&&r(null)},C.forEachOfSeries=C.eachOfSeries=function(t,e,r){function o(){var n=!0;return null===a?r(null):(e(t[a],a,u(function(t){if(t)r(t);else{if(a=c(),null===a)return r(null);n?C.setImmediate(o):o()}})),void(n=!1))}r=i(r||n),t=t||[];var c=h(t),a=c();o()},C.forEachOfLimit=C.eachOfLimit=function(n,t,e,r){v(t)(n,e,r)},C.map=d(b),C.mapSeries=k(b),C.mapLimit=g(b),C.inject=C.foldl=C.reduce=function(n,t,e,r){C.eachOfSeries(n,function(n,r,u){e(t,n,function(n,e){t=e,u(n)})},function(n){r(n,t)})},C.foldr=C.reduceRight=function(n,e,r,u){var i=a(n,t).reverse();C.reduce(i,e,r,u)},C.transform=function(n,t,e,r){3===arguments.length&&(r=e,e=t,t=M(n)?[]:{}),C.eachOf(n,function(n,r,u){e(t,n,r,u)},function(n){r(n,t)})},C.select=C.filter=d(w),C.selectLimit=C.filterLimit=g(w),C.selectSeries=C.filterSeries=k(w),C.reject=d(O),C.rejectLimit=g(O),C.rejectSeries=k(O),C.any=C.some=S(C.eachOf,e,t),C.someLimit=S(C.eachOfLimit,e,t),C.all=C.every=S(C.eachOf,r,r),C.everyLimit=S(C.eachOfLimit,r,r),C.detect=S(C.eachOf,t,E),C.detectSeries=S(C.eachOfSeries,t,E),C.detectLimit=S(C.eachOfLimit,t,E),C.sortBy=function(n,t,e){function r(n,t){var e=n.criteria,r=t.criteria;return r>e?-1:e>r?1:0}C.map(n,function(n,e){t(n,function(t,r){t?e(t):e(null,{value:n,criteria:r})})},function(n,t){return n?e(n):void e(null,a(t.sort(r),function(n){return n.value}))})},C.auto=function(t,e,r){function u(n){d.unshift(n)}function o(n){var t=p(d,n);t>=0&&d.splice(t,1)}function a(){h--,c(d.slice(0),function(n){n()})}r||(r=e,e=null),r=i(r||n);var f=W(t),h=f.length;if(!h)return r(null);e||(e=h);var y={},v=0,d=[];u(function(){h||r(null,y)}),c(f,function(n){function i(){return e>v&&l(g,function(n,t){return n&&y.hasOwnProperty(t)},!0)&&!y.hasOwnProperty(n)}function c(){i()&&(v++,o(c),h[h.length-1](d,y))}for(var f,h=M(t[n])?t[n]:[t[n]],d=m(function(t,e){if(v--,e.length<=1&&(e=e[0]),t){var u={};s(y,function(n,t){u[t]=n}),u[n]=e,r(t,u)}else y[n]=e,C.setImmediate(a)}),g=h.slice(0,h.length-1),k=g.length;k--;){if(!(f=t[g[k]]))throw new Error("Has inexistant dependency");if(M(f)&&p(f,n)>=0)throw new Error("Has cyclic dependencies")}i()?(v++,h[h.length-1](d,y)):u(c)})},C.retry=function(n,t,e){function r(n,t){if("number"==typeof t)n.times=parseInt(t,10)||i;else{if("object"!=typeof t)throw new Error("Unsupported argument type for 'times': "+typeof t);n.times=parseInt(t.times,10)||i,n.interval=parseInt(t.interval,10)||o}}function u(n,t){function e(n,e){return function(r){n(function(n,t){r(!n||e,{err:n,result:t})},t)}}function r(n){return function(t){setTimeout(function(){t(null)},n)}}for(;a.times;){var u=!(a.times-=1);c.push(e(a.task,u)),!u&&a.interval>0&&c.push(r(a.interval))}C.series(c,function(t,e){e=e[e.length-1],(n||a.callback)(e.err,e.result)})}var i=5,o=0,c=[],a={times:i,interval:o},f=arguments.length;if(1>f||f>3)throw new Error("Invalid arguments - must be either (task), (task, callback), (times, task) or (times, task, callback)");return 2>=f&&"function"==typeof n&&(e=t,t=n),"function"!=typeof n&&r(a,n),a.callback=e,a.task=t,a.callback?u():u},C.waterfall=function(t,e){function r(n){return m(function(t,u){if(t)e.apply(null,[t].concat(u));else{var i=n.next();i?u.push(r(i)):u.push(e),z(n).apply(null,u)}})}if(e=i(e||n),!M(t)){var u=new Error("First argument to waterfall must be an array of functions");return e(u)}return t.length?void r(C.iterator(t))():e()},C.parallel=function(n,t){L(C.eachOf,n,t)},C.parallelLimit=function(n,t,e){L(v(t),n,e)},C.series=function(n,t){L(C.eachOfSeries,n,t)},C.iterator=function(n){function t(e){function r(){return n.length&&n[e].apply(null,arguments),r.next()}return r.next=function(){return er;){var i=r+(u-r+1>>>1);e(t,n[i])>=0?r=i:u=i-1}return r}function i(t,e,i,o){if(null!=o&&"function"!=typeof o)throw new Error("task callback must be a function");return t.started=!0,M(e)||(e=[e]),0===e.length?C.setImmediate(function(){t.drain()}):void c(e,function(e){var c={data:e,priority:i,callback:"function"==typeof o?o:n};t.tasks.splice(u(t.tasks,c,r)+1,0,c),t.tasks.length===t.concurrency&&t.saturated(),C.setImmediate(t.process)})}var o=C.queue(t,e);return o.push=function(n,t,e){i(o,n,t,e)},delete o.unshift,o},C.cargo=function(n,t){return x(n,1,t)},C.log=j("log"),C.dir=j("dir"),C.memoize=function(n,e){var r={},u={};e=e||t;var i=m(function(t){var i=t.pop(),o=e.apply(null,t);o in r?C.setImmediate(function(){i.apply(null,r[o])}):o in u?u[o].push(i):(u[o]=[i],n.apply(null,t.concat([m(function(n){r[o]=n;var t=u[o];delete u[o];for(var e=0,i=t.length;i>e;e++)t[e].apply(null,n)})])))});return i.memo=r,i.unmemoized=n,i},C.unmemoize=function(n){return function(){return(n.unmemoized||n).apply(null,arguments)}},C.times=A(C.map),C.timesSeries=A(C.mapSeries),C.timesLimit=function(n,t,e,r){return C.mapLimit(f(n),t,e,r)},C.seq=function(){var t=arguments;return m(function(e){var r=this,u=e[e.length-1];"function"==typeof u?e.pop():u=n,C.reduce(t,e,function(n,t,e){t.apply(r,n.concat([m(function(n,t){e(n,t)})]))},function(n,t){u.apply(r,[n].concat(t))})})},C.compose=function(){return C.seq.apply(null,Array.prototype.reverse.call(arguments))},C.applyEach=T(C.eachOf),C.applyEachSeries=T(C.eachOfSeries),C.forever=function(t,e){function r(n){return n?i(n):void o(r)}var i=u(e||n),o=z(t);r()},C.ensureAsync=z,C.constant=m(function(n){var t=[null].concat(n);return function(n){return n.apply(this,t)}}),C.wrapSync=C.asyncify=function(n){return m(function(t){var e,r=t.pop();try{e=n.apply(this,t)}catch(u){return r(u)}U(e)&&"function"==typeof e.then?e.then(function(n){r(null,n)})["catch"](function(n){r(n.message?n:new Error(n))}):r(null,e)})},"object"==typeof module&&module.exports?module.exports=C:"function"==typeof define&&define.amd?define([],function(){return C}):P.async=C}();
26 | }).call(this,require('_process'),typeof global !== "undefined" ? global : typeof self !== "undefined" ? self : typeof window !== "undefined" ? window : {})
27 |
28 | },{"_process":14}],13:[function(require,module,exports){
29 | exports.load=function(e,t){var n=new XMLHttpRequest;n.onreadystatechange=function(){if(4===n.readyState)if(n.status>=200&&n.status<300)t(null,n.responseText);else{var r=new Error("failed to request file '"+e+"'");r.errno=34,t(r)}};try{n.open("GET",e,!0),n.send(null)}catch(r){t(r)}};
30 | },{}],14:[function(require,module,exports){
31 | function cleanUpNextTick(){draining=!1,currentQueue.length?queue=currentQueue.concat(queue):queueIndex=-1,queue.length&&drainQueue()}function drainQueue(){if(!draining){var e=setTimeout(cleanUpNextTick);draining=!0;for(var n=queue.length;n;){for(currentQueue=queue,queue=[];++queueIndex1)for(var r=1;r