├── .circleci
└── config.yml
├── .gitignore
├── .gitmodules
├── Changelog.md
├── LICENSE
├── MANIFEST.in
├── Makefile
├── README.md
├── binder
├── apt.txt
├── index.ipynb
├── postBuild
└── requirements.txt
├── conx
├── __init__.py
├── _version.py
├── activations.py
├── dataset.py
├── datasets
│ ├── __init__.py
│ ├── _cifar10.py
│ ├── _cifar100.py
│ ├── _colors.py
│ ├── _fingers.py
│ ├── _gridfonts.py
│ ├── _mnist.py
│ └── cmu_faces.py
├── layers.py
├── network.py
├── networks
│ ├── __init__.py
│ └── _keras.py
├── tests
│ └── test_network.py
├── utils.py
└── widgets.py
├── data
├── cmu_faces_full_size.npz
├── cmu_faces_half_size.npz
├── cmu_faces_quarter_size.npz
├── colors.csv
├── figure_ground_a.dat
├── figure_ground_a.npy
├── fingers.npz
├── grid.png
├── gridfonts.dat
├── gridfonts.npy
├── gridfonts.py
├── mnist.h5
├── mnist.py
└── mnist_images.png
├── docker
├── Dockerfile
├── Makefile
└── README.md
├── docs
├── Makefile
├── requirements.txt
└── source
│ ├── _static
│ └── css
│ │ └── custom.css
│ ├── conf.py
│ ├── conx.rst
│ ├── examples.rst
│ ├── img
│ └── logo.gif
│ ├── index.rst
│ └── modules.rst
├── readthedocs.yaml
├── requirements.txt
├── setup.cfg
└── setup.py
/.circleci/config.yml:
--------------------------------------------------------------------------------
1 | # Python CircleCI 2.0 configuration file
2 | #
3 | # Check https://circleci.com/docs/2.0/language-python/ for more details
4 | #
5 | version: 2
6 | jobs:
7 | build:
8 | docker:
9 | # specify the version you desire here
10 | # use `-browsers` prefix for selenium tests, e.g. `3.6.1-browsers`
11 | - image: circleci/python:3.6.1
12 |
13 | # Specify service dependencies here if necessary
14 | # CircleCI maintains a library of pre-built images
15 | # documented at https://circleci.com/docs/2.0/circleci-images/
16 | # - image: circleci/postgres:9.4
17 |
18 | working_directory: ~/conx
19 |
20 | steps:
21 | - checkout
22 |
23 | # Download and cache dependencies
24 | - restore_cache:
25 | keys:
26 | - v1-dependencies-{{ checksum "requirements.txt" }}
27 | # fallback to using the latest cache if no exact match is found
28 | - v1-dependencies-
29 |
30 | - run:
31 | name: install dependencies
32 | command: |
33 | python3 -m venv venv
34 | . venv/bin/activate
35 | pip install -r requirements.txt
36 | pip install nose
37 | pip install tensorflow
38 | pip install codecov
39 |
40 | - save_cache:
41 | paths:
42 | - ./venv
43 | key: v1-dependencies-{{ checksum "requirements.txt" }}
44 |
45 | # run tests!
46 | - run:
47 | name: run tests
48 | command: |
49 | . venv/bin/activate
50 | nosetests --with-coverage --nologcapture --with-doc conx
51 | codecov
52 |
53 | - store_artifacts:
54 | path: test-reports
55 | destination: test-reports
56 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | env/
12 | build/
13 | develop-eggs/
14 | dist/
15 | downloads/
16 | eggs/
17 | .eggs/
18 | lib/
19 | lib64/
20 | parts/
21 | sdist/
22 | var/
23 | *.egg-info/
24 | .installed.cfg
25 | *.egg
26 |
27 | # PyInstaller
28 | # Usually these files are written by a python script from a template
29 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
30 | *.manifest
31 | *.spec
32 |
33 | # Installer logs
34 | pip-log.txt
35 | pip-delete-this-directory.txt
36 |
37 | # Unit test / coverage reports
38 | htmlcov/
39 | .tox/
40 | .coverage
41 | .coverage.*
42 | .cache
43 | nosetests.xml
44 | coverage.xml
45 | *,cover
46 | .hypothesis/
47 |
48 | # Translations
49 | *.mo
50 | *.pot
51 |
52 | # Django stuff:
53 | *.log
54 | local_settings.py
55 |
56 | # Flask stuff:
57 | instance/
58 | .webassets-cache
59 |
60 | # Scrapy stuff:
61 | .scrapy
62 |
63 | # Sphinx documentation
64 | docs/_build/
65 |
66 | # PyBuilder
67 | target/
68 |
69 | # IPython Notebook
70 | .ipynb_checkpoints
71 |
72 | # pyenv
73 | .python-version
74 |
75 | # celery beat schedule file
76 | celerybeat-schedule
77 |
78 | # dotenv
79 | .env
80 |
81 | # virtualenv
82 | venv/
83 | ENV/
84 |
85 | # Spyder project settings
86 | .spyderproject
87 |
88 | # Rope project settings
89 | .ropeproject
90 |
91 | ## conx specific:
92 |
93 | docs/source/*.ipynb
94 | docs/source/*.jpg
95 | docs/source/*.gif
96 | docs/source/*.png
97 | docs/source/*.md
98 | docs/source/_static/*.mp4
99 | tmp/
100 | Makefile
101 | notebooks/*.conx/*
--------------------------------------------------------------------------------
/.gitmodules:
--------------------------------------------------------------------------------
1 | [submodule "notebooks"]
2 | path = notebooks
3 | url = https://github.com/calysto/conx-notebooks
4 |
--------------------------------------------------------------------------------
/Changelog.md:
--------------------------------------------------------------------------------
1 | # Changelog
2 |
3 | ## 3.7.5
4 |
5 | Released Wed September 12, 2018
6 |
7 | * Re-wrote reset_weights; just recompiles model
8 | * Fixed error in gridfont loader
9 | * All widgets/pictures are JupyterLab compatible
10 | * Added support for dynamic_pictures on/off; default is off
11 | * SVG arrows are now curves
12 | * New algorithm for bank layout in SVG
13 | * moved dataset.get() to Dataset.get() and net.get_dataset()
14 | * new virtual datasets API, including vmnist, H5Dataset (remote and local)
15 | * better cache in virtual datasets
16 | * Allow dropout to operate on 0, 1, 2, or 3 whole dims
17 | * Added cx.Layer(bidirectional=mode)
18 | * Show network banks as red until compiled
19 | * Rewrote and renamed net.test() to net.evaluate() and net.evaluate_and_label()
20 | * net.evaluate() for showing results
21 | * net.evaluate_and_label() for use in plots
22 |
23 | ## 3.7.4
24 |
25 | Released Sun August 19, 2018
26 |
27 | * net.pp() gives standard formatting for ints and floats
28 | * Allow negative position in virtual dataset vectors
29 | * Fixed error in colors dataset that truncated the target integer to 8 bits
30 | * Add internal error function to net.compile(error={...})
31 | * New spelling: ConX
32 | * cx.image_to_array() removes alpha
33 | * vshape can be three dimensions (for color images)
34 | * some new image functions: image_resize(), image_remove_alpha()
35 | * renamed "sequence" to "raw" in utils
36 | * Added cx.shape(summary=False), cx.get_ranges(array, form), and get_dim(array, DIMS)
37 | * Use kverbose in train() for all keras activity
38 |
39 | ## 3.7.3
40 |
41 | Released Mon August 13, 2017
42 |
43 | * Allow bool values with onehot
44 | * Unfix fixed crossentropy warning
45 | * Allow datasets to be composed of bools
46 | * added temperature to choice()
47 | * Added net.dataset.inputs.test(tolerance=0.2, index=True)
48 |
49 | ## 3.7.1
50 |
51 | Released Fri August 10, 2018
52 |
53 | * Separate build/compile --- compile() no longer resets weights;
54 | * added net.add_loss()
55 | * Remove additional_output_banks
56 | * refactor build/compile
57 | * add LambdaLayer with size
58 | * add prop_from_dict[(input, output)] = model
59 |
60 | ## 3.7.0
61 |
62 | Released Tue Aug 7, 2018
63 |
64 | * Allow additional output layers for network
65 | * Fix: crossentropy check
66 | * added indentity layer for propagating to input layers
67 | * Include LICENSE.txt file in wheels
68 |
69 | ## 3.6.10
70 |
71 | Released Thu May 17, 2018
72 |
73 | * delete layers, IncepetionV3, combine networks
74 | * ability to delete layers
75 | * ability to connect two networks together
76 | * rewrote SVG embedded images to use standard cairosvg
77 | * added inceptionv3 network
78 | * cx.download has new verbose flag
79 | * fixes for minimum and maximum
80 | * import_keras_model now forms proper connections
81 | * Warn when displaying network if not compiled then activations won't be visible
82 | * array_to_image(colormap=) now returns RGBA image
83 |
84 | ## 3.6.9
85 |
86 | Released Fri May 4, 2018
87 |
88 | * propagate_to_features() scales layer[0]
89 | * added cx.array
90 | * fixed (somewhat) array_to_image(colormap=)
91 | * added VGG16 and ImageNet notebook
92 | * New Network.info()
93 | * Updated Network.propagate_to_features(), util.image()
94 | * Network.info() describes predefined networks
95 | * new utility image(filename)
96 | * rewrote Network.propagate_to_features() to be faster
97 | * added VGG preprocessing and postprocessing
98 | * Picture autoscales inputs by default
99 | * Add net.picture(minmax=)
100 | * Rebuild models on import_keras
101 | * Added VGG19
102 | * Added vgg16 and idea of applications as Network.networks
103 | * Bug in building intermediate hidden -> output models
104 |
105 | ## 3.6.7
106 |
107 | Released Tue April 17, 2018
108 |
109 | * Fixed bug in building hidden -> output intermediate models
110 |
111 | ## 3.6.6
112 |
113 | Released Fri April 13, 2018
114 |
115 | * Added cx.view_image_list(pivot) - rotates list and layout
116 | * Added colors dataset
117 | * Added Dataset.delete_bank(), Dataset.append_bank()
118 | * Added Dataset.ITEM[V] = value
119 |
120 | ## 3.6.5
121 |
122 | Released Fri April 6, 2018
123 |
124 | * Removed examples; use notebooks or help instead
125 | * cx.view_image_list() can have layout=None, (int, None), or (None, int)
126 | * Added cx.scale(vector, range, dtype, truncate)
127 | * Added cx.scatter_images(images, xys) - creates scatter plot of images
128 | * Fixed pca.translate(scale=SCALE) bug
129 | * downgrade tensorflow on readthedocs because memory hog kills build
130 |
131 | ## 3.6.4
132 |
133 | Released Thur April 5, 2018
134 |
135 | * changed "not allowed" warning on multi-dim outputs to
136 | "are you sure?"
137 | * fix colormap on array_to_image; added tests
138 | * fix cx.view(array)
139 | * Allow dataset to load generators, zips, etc.
140 |
141 | ## 3.6.3
142 |
143 | Released Tue April 3, 2018
144 |
145 | * Two fixes for array_to_image: div by float; move cmap conversion to end
146 | * Protection for list/array for range and shape
147 | * from kmader: Adding jyro to binder requirements
148 |
149 | ## 3.6.2
150 |
151 | Released Tue March 6, 2018
152 |
153 | * added raw=False to conx.utilities image_to_array(), frange(), and reshape()
154 |
155 | ## 3.6.1
156 |
157 | Released Mon March 5, 2018
158 |
159 | * SVG Network enhancements
160 | * vertical and horizontal space
161 | * fixed network drawing connection paths
162 | * save keras functions
163 | * don't crash in attempting to build propagate_from pathways
164 | * added binary_to_int
165 | * download() can rename file
166 | * fixed mislabeled MNIST image
167 | * better memory management when load cifar
168 | * Network.train(verbose=0) returns proper values
169 | * labels for finger dataset are now strings
170 | * added labels for cx.view_image_list()
171 | * fixed bug in len(dataset.labels)
172 | * added layout to net.plot_layer_weights()
173 | * added ImageLayer(keep_aspect_ratio)
174 | * fixed bugs in datavector.shape
175 | * added dataset.load(generator, count)
176 | * fixed bugs in net.get_weights()/set_weights()
177 | * added network.propagate_to_image(feature=NUM)
178 |
179 | ## 3.6.0
180 |
181 | Released Mon Feb 12, 2018. Initial released version recommended for daily use.
182 |
183 | * fixed blurry activation network pictures
184 | * show "[layer(s) not shown]" when Layer(visible=False)
185 | * added fingers dataset
186 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | COPYRIGHT
2 |
3 | All contributions by Calysto Conx Project members:
4 | Copyright (c) 2017, Calysto Conx Project members.
5 | All rights reserved.
6 |
7 | All contributions by François Chollet:
8 | Copyright (c) 2015, François Chollet.
9 | All rights reserved.
10 |
11 | All contributions by Google:
12 | Copyright (c) 2015, Google, Inc.
13 | All rights reserved.
14 |
15 | All contributions by Microsoft:
16 | Copyright (c) 2017, Microsoft, Inc.
17 | All rights reserved.
18 |
19 | All other contributions:
20 | Copyright (c) 2015 - 2017, the respective contributors.
21 | All rights reserved.
22 |
23 | Each contributor holds copyright over their respective contributions.
24 | The project versioning (Git) records all such contribution source information.
25 |
26 | LICENSE
27 |
28 | The MIT License (MIT)
29 |
30 | Permission is hereby granted, free of charge, to any person obtaining a copy
31 | of this software and associated documentation files (the "Software"), to deal
32 | in the Software without restriction, including without limitation the rights
33 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
34 | copies of the Software, and to permit persons to whom the Software is
35 | furnished to do so, subject to the following conditions:
36 |
37 | The above copyright notice and this permission notice shall be included in all
38 | copies or substantial portions of the Software.
39 |
40 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
41 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
42 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
43 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
44 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
45 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
46 | SOFTWARE.
47 |
48 |
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include *.md
2 | include LICENSE
3 | prune .git
4 | prune dist
5 | prune build
6 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | export VERSION=`python3 setup.py --version 2>/dev/null`
2 |
3 | release:
4 | pip3 install wheel twine setuptools --user
5 | rm -rf dist
6 | python3 setup.py register
7 | python3 setup.py bdist_wheel --universal
8 | python3 setup.py sdist
9 | git commit -a -m "Release $(VERSION)"; true
10 | git tag v$(VERSION)
11 | git push origin --all
12 | git push origin --tags
13 | twine upload dist/*
14 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # ConX Neural Networks
2 |
3 | ## The On-Ramp to Deep Learning
4 |
5 | Built in Python 3 on Keras 2.
6 |
7 | [](https://mybinder.org/v2/gh/Calysto/conx/master?filepath=binder%2Findex.ipynb)
8 | [](https://circleci.com/gh/Calysto/conx/tree/master)
9 | [](https://codecov.io/gh/Calysto/conx)
10 | [](http://conx.readthedocs.io/en/latest/?badge=latest)
11 | [](https://badge.fury.io/py/conx)
12 | [](https://pypistats.org/packages/conx)
13 |
14 | Read the documentation at [conx.readthedocs.io](http://conx.readthedocs.io/)
15 |
16 | Ask questions on the mailing list: [conx-users](https://groups.google.com/forum/#!forum/conx-users)
17 |
18 | Implements Deep Learning neural network algorithms using a simple interface with easy visualizations and useful analytics. Built on top of Keras, which can use either [TensorFlow](https://www.tensorflow.org/), [Theano](http://www.deeplearning.net/software/theano/), or [CNTK](https://www.cntk.ai/pythondocs/).
19 |
20 | A network can be specified to the constructor by providing sizes. For example, Network("XOR", 2, 5, 1) specifies a network named "XOR" with a 2-node input layer, 5-unit hidden layer, and a 1-unit output layer. However, any complex network can be constructed using the `net.connect()` method.
21 |
22 | Computing XOR via a target function:
23 |
24 | ```python
25 | import conx as cx
26 |
27 | dataset = [[[0, 0], [0]],
28 | [[0, 1], [1]],
29 | [[1, 0], [1]],
30 | [[1, 1], [0]]]
31 |
32 | net = cx.Network("XOR", 2, 5, 1, activation="sigmoid")
33 | net.dataset.load(dataset)
34 | net.compile(error='mean_squared_error',
35 | optimizer="sgd", lr=0.3, momentum=0.9)
36 | net.train(2000, report_rate=10, accuracy=1.0)
37 | net.test(show=True)
38 | ```
39 |
40 | Creates dynamic, rendered visualizations like this:
41 |
42 |
43 |
44 | ## Examples
45 |
46 | See [conx-notebooks](https://github.com/Calysto/conx-notebooks/blob/master/00_Index.ipynb) and the [documentation](http://conx.readthedocs.io/en/latest/) for additional examples.
47 |
48 | ## Installation
49 |
50 | See [How To Run Conx](https://github.com/Calysto/conx-notebooks/tree/master/HowToRun#how-to-run-conx)
51 | to see options on running virtual machines, in the cloud, and personal
52 | installation.
53 |
--------------------------------------------------------------------------------
/binder/apt.txt:
--------------------------------------------------------------------------------
1 | libffi-dev
2 | libffi6
3 | ffmpeg
4 | texlive-latex-base
5 | texlive-latex-recommended
6 | texlive-science
7 | texlive-latex-extra
8 | texlive-fonts-recommended
9 | dvipng
10 | ghostscript
11 | graphviz
12 |
--------------------------------------------------------------------------------
/binder/index.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Welcome to Deep Learning with conx!\n",
8 | "\n",
9 | "This is a live Jupyter notebook running in the cloud via mybinder.\n",
10 | "\n",
11 | "To test Conx, and get your own copy of the conx-notebooks:"
12 | ]
13 | },
14 | {
15 | "cell_type": "code",
16 | "execution_count": 1,
17 | "metadata": {},
18 | "outputs": [
19 | {
20 | "name": "stderr",
21 | "output_type": "stream",
22 | "text": [
23 | "Using Theano backend.\n",
24 | "Conx, version 3.6.0\n"
25 | ]
26 | }
27 | ],
28 | "source": [
29 | "import conx as cx"
30 | ]
31 | },
32 | {
33 | "cell_type": "code",
34 | "execution_count": null,
35 | "metadata": {},
36 | "outputs": [],
37 | "source": [
38 | "cx.download(\"https://github.com/Calysto/conx-notebooks/archive/master.zip\")"
39 | ]
40 | },
41 | {
42 | "cell_type": "markdown",
43 | "metadata": {},
44 | "source": [
45 | "To \"trust\" these notebooks:"
46 | ]
47 | },
48 | {
49 | "cell_type": "code",
50 | "execution_count": null,
51 | "metadata": {},
52 | "outputs": [],
53 | "source": [
54 | "! jupyter trust conx-notebooks-master/*.ipynb"
55 | ]
56 | },
57 | {
58 | "cell_type": "markdown",
59 | "metadata": {},
60 | "source": [
61 | "You can then open \n",
62 | "\n",
63 | "* [conx-notebooks-master/00_Index.ipynb](conx-notebooks-master/00_Index.ipynb) \n",
64 | "* [conx-notebooks-master/collections/sigcse-2018/](conx-notebooks-master/collections/sigcse-2018/) \n",
65 | "\n",
66 | "and explore the notebooks.\n",
67 | "\n",
68 | "Welcome to Conx!\n",
69 | "\n",
70 | "For more help or information see:\n",
71 | "\n",
72 | "* http://conx.readthedocs.io/en/latest/ - online help\n",
73 | "* https://groups.google.com/forum/#!forum/conx-users - conx-users mailing list"
74 | ]
75 | }
76 | ],
77 | "metadata": {
78 | "kernelspec": {
79 | "display_name": "Python 3",
80 | "language": "python",
81 | "name": "python3"
82 | },
83 | "language_info": {
84 | "codemirror_mode": {
85 | "name": "ipython",
86 | "version": 3
87 | },
88 | "file_extension": ".py",
89 | "mimetype": "text/x-python",
90 | "name": "python",
91 | "nbconvert_exporter": "python",
92 | "pygments_lexer": "ipython3",
93 | "version": "3.6.3"
94 | },
95 | "widgets": {
96 | "application/vnd.jupyter.widget-state+json": {
97 | "state": {
98 | "0c01554263f740678e052dddc7baa232": {
99 | "model_module": "@jupyter-widgets/controls",
100 | "model_module_version": "1.1.0",
101 | "model_name": "SelectModel",
102 | "state": {
103 | "_options_labels": [
104 | "Test",
105 | "Train"
106 | ],
107 | "description": "Dataset:",
108 | "index": 1,
109 | "layout": "IPY_MODEL_b6d05976b5cb40ff8a4ddee3c5a861d9",
110 | "rows": 1,
111 | "style": "IPY_MODEL_51461b41bbd4492583f7fbd5d631835d"
112 | }
113 | },
114 | "136345188d7d4eeb97ad63b4e39206c0": {
115 | "model_module": "@jupyter-widgets/base",
116 | "model_module_version": "1.0.0",
117 | "model_name": "LayoutModel",
118 | "state": {}
119 | },
120 | "137cc55f71044b329686552e4d5ed5fe": {
121 | "model_module": "@jupyter-widgets/controls",
122 | "model_module_version": "1.1.0",
123 | "model_name": "HBoxModel",
124 | "state": {
125 | "children": [
126 | "IPY_MODEL_9bfc7d1321df402ebac286b2c8b7a47d",
127 | "IPY_MODEL_f3801cae672644048cd4eb49c89f6fb5",
128 | "IPY_MODEL_dd4b1f529c7941569816da9769084b90",
129 | "IPY_MODEL_5ef72263c5e247cbbb9916bffccf7485",
130 | "IPY_MODEL_c491fff0bf9f47339ab116e4a2b728c9",
131 | "IPY_MODEL_4cb4b63e98944567b0959241151670e8",
132 | "IPY_MODEL_44d99e7e5001431abc7161a689dd6ea5"
133 | ],
134 | "layout": "IPY_MODEL_f6b3e8fc467742ac88c9cf3bf03cfa03"
135 | }
136 | },
137 | "15dbc9bb00744ca8a4613fd572a49fc0": {
138 | "model_module": "@jupyter-widgets/controls",
139 | "model_module_version": "1.1.0",
140 | "model_name": "ButtonStyleModel",
141 | "state": {}
142 | },
143 | "15efb7a6a85b429c857fdc8e18c154c7": {
144 | "model_module": "@jupyter-widgets/output",
145 | "model_module_version": "1.0.0",
146 | "model_name": "OutputModel",
147 | "state": {
148 | "layout": "IPY_MODEL_e4910a4128764aad9d412609da7c9a53"
149 | }
150 | },
151 | "16832b10a05e46b19183f1726ed80faf": {
152 | "model_module": "@jupyter-widgets/base",
153 | "model_module_version": "1.0.0",
154 | "model_name": "LayoutModel",
155 | "state": {
156 | "justify_content": "center",
157 | "overflow_x": "auto",
158 | "overflow_y": "auto",
159 | "width": "95%"
160 | }
161 | },
162 | "17b44abf16704be4971f53e8160fce47": {
163 | "model_module": "@jupyter-widgets/base",
164 | "model_module_version": "1.0.0",
165 | "model_name": "LayoutModel",
166 | "state": {}
167 | },
168 | "18294f98de474bd794eb2b003add21f8": {
169 | "model_module": "@jupyter-widgets/controls",
170 | "model_module_version": "1.1.0",
171 | "model_name": "FloatTextModel",
172 | "state": {
173 | "description": "Leftmost color maps to:",
174 | "layout": "IPY_MODEL_17b44abf16704be4971f53e8160fce47",
175 | "step": null,
176 | "style": "IPY_MODEL_733f45e0a1c44fe1aa0aca4ee87c5217",
177 | "value": -1
178 | }
179 | },
180 | "21191f7705374908a284ccf55829951c": {
181 | "model_module": "@jupyter-widgets/base",
182 | "model_module_version": "1.0.0",
183 | "model_name": "LayoutModel",
184 | "state": {}
185 | },
186 | "24381fd59a954b2980c8347250859849": {
187 | "model_module": "@jupyter-widgets/controls",
188 | "model_module_version": "1.1.0",
189 | "model_name": "DescriptionStyleModel",
190 | "state": {
191 | "description_width": "initial"
192 | }
193 | },
194 | "26908fd67f194c21a968e035de73aa1a": {
195 | "model_module": "@jupyter-widgets/controls",
196 | "model_module_version": "1.1.0",
197 | "model_name": "SelectModel",
198 | "state": {
199 | "_options_labels": [
200 | "input",
201 | "hidden",
202 | "output"
203 | ],
204 | "description": "Layer:",
205 | "index": 2,
206 | "layout": "IPY_MODEL_da640a5ac3bc4a59ab8a82bc8fcbf746",
207 | "rows": 1,
208 | "style": "IPY_MODEL_5348b0f185bd4fa5a642e5121eb0feed"
209 | }
210 | },
211 | "2982cc6bc30940ff96f2f9131a4a6241": {
212 | "model_module": "@jupyter-widgets/base",
213 | "model_module_version": "1.0.0",
214 | "model_name": "LayoutModel",
215 | "state": {}
216 | },
217 | "29eb7d46cc034ca5abe9c2d759997fb5": {
218 | "model_module": "@jupyter-widgets/controls",
219 | "model_module_version": "1.1.0",
220 | "model_name": "FloatSliderModel",
221 | "state": {
222 | "continuous_update": false,
223 | "description": "Zoom",
224 | "layout": "IPY_MODEL_55ec8f78c85a4eb8af0a7bbd80cf4ce3",
225 | "max": 3,
226 | "min": 0.5,
227 | "step": 0.1,
228 | "style": "IPY_MODEL_bea9ec4c51724a9797030debbd44b595",
229 | "value": 1
230 | }
231 | },
232 | "2aa5894453424c4aacf351dd8c8ab922": {
233 | "model_module": "@jupyter-widgets/base",
234 | "model_module_version": "1.0.0",
235 | "model_name": "LayoutModel",
236 | "state": {}
237 | },
238 | "3156b5f6d28642bd9633e8e5518c9097": {
239 | "model_module": "@jupyter-widgets/controls",
240 | "model_module_version": "1.1.0",
241 | "model_name": "VBoxModel",
242 | "state": {
243 | "children": [
244 | "IPY_MODEL_0c01554263f740678e052dddc7baa232",
245 | "IPY_MODEL_29eb7d46cc034ca5abe9c2d759997fb5",
246 | "IPY_MODEL_dd45f7f9734f48fba70aaf8ae65dbb97",
247 | "IPY_MODEL_421ac3c41d5b46e6af897887dae323e3",
248 | "IPY_MODEL_b9be5483405141b88cf18e7d4f4963de",
249 | "IPY_MODEL_6c4dcc259efa4621aac46f4703595e4d",
250 | "IPY_MODEL_c712050936334e1d859ad0bdbba33a50",
251 | "IPY_MODEL_5a5d74e4200043f6ab928822b4346aa4"
252 | ],
253 | "layout": "IPY_MODEL_34cf92591c7545d3b47483fc9b50c798"
254 | }
255 | },
256 | "326c868bba2744109476ec1a641314b3": {
257 | "model_module": "@jupyter-widgets/controls",
258 | "model_module_version": "1.1.0",
259 | "model_name": "VBoxModel",
260 | "state": {
261 | "children": [
262 | "IPY_MODEL_26908fd67f194c21a968e035de73aa1a",
263 | "IPY_MODEL_7db6e0d0198d45f98e8c5a6f08d0791b",
264 | "IPY_MODEL_8193e107e49f49dda781c456011d9a78",
265 | "IPY_MODEL_b0b819e6de95419a9eeb8a0002353ee1",
266 | "IPY_MODEL_18294f98de474bd794eb2b003add21f8",
267 | "IPY_MODEL_a836973f20574c71b33aa3cef64e7f1e",
268 | "IPY_MODEL_71588c1ad11e42df844dff42b5ab041d"
269 | ],
270 | "layout": "IPY_MODEL_4b5e4ed95f04466e8685437a1f5593e7"
271 | }
272 | },
273 | "328d40ca9797496b91ee0685f8f59d05": {
274 | "model_module": "@jupyter-widgets/base",
275 | "model_module_version": "1.0.0",
276 | "model_name": "LayoutModel",
277 | "state": {
278 | "width": "100%"
279 | }
280 | },
281 | "32eb1f056b8a4311a1448f7fd4b7459e": {
282 | "model_module": "@jupyter-widgets/controls",
283 | "model_module_version": "1.1.0",
284 | "model_name": "ButtonStyleModel",
285 | "state": {}
286 | },
287 | "34cf92591c7545d3b47483fc9b50c798": {
288 | "model_module": "@jupyter-widgets/base",
289 | "model_module_version": "1.0.0",
290 | "model_name": "LayoutModel",
291 | "state": {
292 | "width": "100%"
293 | }
294 | },
295 | "3501c6717de1410aba4274dec484dde0": {
296 | "model_module": "@jupyter-widgets/controls",
297 | "model_module_version": "1.1.0",
298 | "model_name": "DescriptionStyleModel",
299 | "state": {
300 | "description_width": "initial"
301 | }
302 | },
303 | "364a4cc16b974d61b20102f7cbd4e9fe": {
304 | "model_module": "@jupyter-widgets/controls",
305 | "model_module_version": "1.1.0",
306 | "model_name": "DescriptionStyleModel",
307 | "state": {
308 | "description_width": "initial"
309 | }
310 | },
311 | "379c8a4fc2c04ec1aea72f9a38509632": {
312 | "model_module": "@jupyter-widgets/controls",
313 | "model_module_version": "1.1.0",
314 | "model_name": "DescriptionStyleModel",
315 | "state": {
316 | "description_width": "initial"
317 | }
318 | },
319 | "3b7e0bdd891743bda0dd4a9d15dd0a42": {
320 | "model_module": "@jupyter-widgets/controls",
321 | "model_module_version": "1.1.0",
322 | "model_name": "DescriptionStyleModel",
323 | "state": {
324 | "description_width": ""
325 | }
326 | },
327 | "402fa77b803048b2990b273187598d95": {
328 | "model_module": "@jupyter-widgets/controls",
329 | "model_module_version": "1.1.0",
330 | "model_name": "SliderStyleModel",
331 | "state": {
332 | "description_width": ""
333 | }
334 | },
335 | "421ac3c41d5b46e6af897887dae323e3": {
336 | "model_module": "@jupyter-widgets/controls",
337 | "model_module_version": "1.1.0",
338 | "model_name": "IntTextModel",
339 | "state": {
340 | "description": "Vertical space between layers:",
341 | "layout": "IPY_MODEL_9fca8922d6a84010a73ddb46f2713c40",
342 | "step": 1,
343 | "style": "IPY_MODEL_379c8a4fc2c04ec1aea72f9a38509632",
344 | "value": 30
345 | }
346 | },
347 | "4479c1d8a9d74da1800e20711176fe1a": {
348 | "model_module": "@jupyter-widgets/base",
349 | "model_module_version": "1.0.0",
350 | "model_name": "LayoutModel",
351 | "state": {}
352 | },
353 | "44d99e7e5001431abc7161a689dd6ea5": {
354 | "model_module": "@jupyter-widgets/controls",
355 | "model_module_version": "1.1.0",
356 | "model_name": "ButtonModel",
357 | "state": {
358 | "icon": "refresh",
359 | "layout": "IPY_MODEL_861d8540f11944d8995a9f5c1385c829",
360 | "style": "IPY_MODEL_eba95634dd3241f2911fc17342ddf924"
361 | }
362 | },
363 | "486e1a6b578b44b397b02f1dbafc907b": {
364 | "model_module": "@jupyter-widgets/controls",
365 | "model_module_version": "1.1.0",
366 | "model_name": "DescriptionStyleModel",
367 | "state": {
368 | "description_width": ""
369 | }
370 | },
371 | "4b5e4ed95f04466e8685437a1f5593e7": {
372 | "model_module": "@jupyter-widgets/base",
373 | "model_module_version": "1.0.0",
374 | "model_name": "LayoutModel",
375 | "state": {
376 | "width": "100%"
377 | }
378 | },
379 | "4cb4b63e98944567b0959241151670e8": {
380 | "model_module": "@jupyter-widgets/controls",
381 | "model_module_version": "1.1.0",
382 | "model_name": "ButtonModel",
383 | "state": {
384 | "description": "Play",
385 | "icon": "play",
386 | "layout": "IPY_MODEL_328d40ca9797496b91ee0685f8f59d05",
387 | "style": "IPY_MODEL_79f332bd05a2497eb63b8f6cc6acf8a3"
388 | }
389 | },
390 | "4e5807bdbf3d4dc2b611e26fa56b6101": {
391 | "model_module": "@jupyter-widgets/base",
392 | "model_module_version": "1.0.0",
393 | "model_name": "LayoutModel",
394 | "state": {
395 | "width": "100px"
396 | }
397 | },
398 | "51461b41bbd4492583f7fbd5d631835d": {
399 | "model_module": "@jupyter-widgets/controls",
400 | "model_module_version": "1.1.0",
401 | "model_name": "DescriptionStyleModel",
402 | "state": {
403 | "description_width": ""
404 | }
405 | },
406 | "5348b0f185bd4fa5a642e5121eb0feed": {
407 | "model_module": "@jupyter-widgets/controls",
408 | "model_module_version": "1.1.0",
409 | "model_name": "DescriptionStyleModel",
410 | "state": {
411 | "description_width": ""
412 | }
413 | },
414 | "55ec8f78c85a4eb8af0a7bbd80cf4ce3": {
415 | "model_module": "@jupyter-widgets/base",
416 | "model_module_version": "1.0.0",
417 | "model_name": "LayoutModel",
418 | "state": {}
419 | },
420 | "5a5d74e4200043f6ab928822b4346aa4": {
421 | "model_module": "@jupyter-widgets/controls",
422 | "model_module_version": "1.1.0",
423 | "model_name": "FloatTextModel",
424 | "state": {
425 | "description": "Feature scale:",
426 | "layout": "IPY_MODEL_6ee890b9a11c4ce0868fc8da9b510720",
427 | "step": null,
428 | "style": "IPY_MODEL_24381fd59a954b2980c8347250859849",
429 | "value": 2
430 | }
431 | },
432 | "5ef72263c5e247cbbb9916bffccf7485": {
433 | "model_module": "@jupyter-widgets/controls",
434 | "model_module_version": "1.1.0",
435 | "model_name": "ButtonModel",
436 | "state": {
437 | "icon": "forward",
438 | "layout": "IPY_MODEL_f5e9b479caa3491496610a1bca70f6f6",
439 | "style": "IPY_MODEL_32eb1f056b8a4311a1448f7fd4b7459e"
440 | }
441 | },
442 | "63880f7d52ea4c7da360d2269be5cbd2": {
443 | "model_module": "@jupyter-widgets/base",
444 | "model_module_version": "1.0.0",
445 | "model_name": "LayoutModel",
446 | "state": {
447 | "width": "100%"
448 | }
449 | },
450 | "66e29c1eb7fd494babefa05037841259": {
451 | "model_module": "@jupyter-widgets/controls",
452 | "model_module_version": "1.1.0",
453 | "model_name": "CheckboxModel",
454 | "state": {
455 | "description": "Errors",
456 | "disabled": false,
457 | "layout": "IPY_MODEL_9fca8922d6a84010a73ddb46f2713c40",
458 | "style": "IPY_MODEL_3501c6717de1410aba4274dec484dde0",
459 | "value": false
460 | }
461 | },
462 | "6b1a08b14f2647c3aace0739e77581de": {
463 | "model_module": "@jupyter-widgets/base",
464 | "model_module_version": "1.0.0",
465 | "model_name": "LayoutModel",
466 | "state": {}
467 | },
468 | "6c4dcc259efa4621aac46f4703595e4d": {
469 | "model_module": "@jupyter-widgets/controls",
470 | "model_module_version": "1.1.0",
471 | "model_name": "SelectModel",
472 | "state": {
473 | "_options_labels": [
474 | ""
475 | ],
476 | "description": "Features:",
477 | "index": 0,
478 | "layout": "IPY_MODEL_2982cc6bc30940ff96f2f9131a4a6241",
479 | "rows": 1,
480 | "style": "IPY_MODEL_c0a2f4ee45914dcaa1a8bdacac5046ed"
481 | }
482 | },
483 | "6ee890b9a11c4ce0868fc8da9b510720": {
484 | "model_module": "@jupyter-widgets/base",
485 | "model_module_version": "1.0.0",
486 | "model_name": "LayoutModel",
487 | "state": {}
488 | },
489 | "6f384b6b080b4a72bfaae3920f9b7163": {
490 | "model_module": "@jupyter-widgets/controls",
491 | "model_module_version": "1.1.0",
492 | "model_name": "HBoxModel",
493 | "state": {
494 | "children": [
495 | "IPY_MODEL_88d0c8c574214d258d52e6cf10c90587",
496 | "IPY_MODEL_8c242c24558644d68a3fa12cc2d805ba"
497 | ],
498 | "layout": "IPY_MODEL_a05f399be93e4461a73ff1e852749db5"
499 | }
500 | },
501 | "71588c1ad11e42df844dff42b5ab041d": {
502 | "model_module": "@jupyter-widgets/controls",
503 | "model_module_version": "1.1.0",
504 | "model_name": "IntTextModel",
505 | "state": {
506 | "description": "Feature to show:",
507 | "layout": "IPY_MODEL_136345188d7d4eeb97ad63b4e39206c0",
508 | "step": 1,
509 | "style": "IPY_MODEL_364a4cc16b974d61b20102f7cbd4e9fe"
510 | }
511 | },
512 | "733f45e0a1c44fe1aa0aca4ee87c5217": {
513 | "model_module": "@jupyter-widgets/controls",
514 | "model_module_version": "1.1.0",
515 | "model_name": "DescriptionStyleModel",
516 | "state": {
517 | "description_width": "initial"
518 | }
519 | },
520 | "788f81e5b21a4638b040e17ac78b8ce6": {
521 | "model_module": "@jupyter-widgets/controls",
522 | "model_module_version": "1.1.0",
523 | "model_name": "DescriptionStyleModel",
524 | "state": {
525 | "description_width": "initial"
526 | }
527 | },
528 | "79f332bd05a2497eb63b8f6cc6acf8a3": {
529 | "model_module": "@jupyter-widgets/controls",
530 | "model_module_version": "1.1.0",
531 | "model_name": "ButtonStyleModel",
532 | "state": {}
533 | },
534 | "7db6e0d0198d45f98e8c5a6f08d0791b": {
535 | "model_module": "@jupyter-widgets/controls",
536 | "model_module_version": "1.1.0",
537 | "model_name": "CheckboxModel",
538 | "state": {
539 | "description": "Visible",
540 | "disabled": false,
541 | "layout": "IPY_MODEL_9fca8922d6a84010a73ddb46f2713c40",
542 | "style": "IPY_MODEL_df5e3f91eea7415a888271f8fc68f9a5",
543 | "value": true
544 | }
545 | },
546 | "8193e107e49f49dda781c456011d9a78": {
547 | "model_module": "@jupyter-widgets/controls",
548 | "model_module_version": "1.1.0",
549 | "model_name": "SelectModel",
550 | "state": {
551 | "_options_labels": [
552 | "",
553 | "Accent",
554 | "Accent_r",
555 | "Blues",
556 | "Blues_r",
557 | "BrBG",
558 | "BrBG_r",
559 | "BuGn",
560 | "BuGn_r",
561 | "BuPu",
562 | "BuPu_r",
563 | "CMRmap",
564 | "CMRmap_r",
565 | "Dark2",
566 | "Dark2_r",
567 | "GnBu",
568 | "GnBu_r",
569 | "Greens",
570 | "Greens_r",
571 | "Greys",
572 | "Greys_r",
573 | "OrRd",
574 | "OrRd_r",
575 | "Oranges",
576 | "Oranges_r",
577 | "PRGn",
578 | "PRGn_r",
579 | "Paired",
580 | "Paired_r",
581 | "Pastel1",
582 | "Pastel1_r",
583 | "Pastel2",
584 | "Pastel2_r",
585 | "PiYG",
586 | "PiYG_r",
587 | "PuBu",
588 | "PuBuGn",
589 | "PuBuGn_r",
590 | "PuBu_r",
591 | "PuOr",
592 | "PuOr_r",
593 | "PuRd",
594 | "PuRd_r",
595 | "Purples",
596 | "Purples_r",
597 | "RdBu",
598 | "RdBu_r",
599 | "RdGy",
600 | "RdGy_r",
601 | "RdPu",
602 | "RdPu_r",
603 | "RdYlBu",
604 | "RdYlBu_r",
605 | "RdYlGn",
606 | "RdYlGn_r",
607 | "Reds",
608 | "Reds_r",
609 | "Set1",
610 | "Set1_r",
611 | "Set2",
612 | "Set2_r",
613 | "Set3",
614 | "Set3_r",
615 | "Spectral",
616 | "Spectral_r",
617 | "Vega10",
618 | "Vega10_r",
619 | "Vega20",
620 | "Vega20_r",
621 | "Vega20b",
622 | "Vega20b_r",
623 | "Vega20c",
624 | "Vega20c_r",
625 | "Wistia",
626 | "Wistia_r",
627 | "YlGn",
628 | "YlGnBu",
629 | "YlGnBu_r",
630 | "YlGn_r",
631 | "YlOrBr",
632 | "YlOrBr_r",
633 | "YlOrRd",
634 | "YlOrRd_r",
635 | "afmhot",
636 | "afmhot_r",
637 | "autumn",
638 | "autumn_r",
639 | "binary",
640 | "binary_r",
641 | "bone",
642 | "bone_r",
643 | "brg",
644 | "brg_r",
645 | "bwr",
646 | "bwr_r",
647 | "cool",
648 | "cool_r",
649 | "coolwarm",
650 | "coolwarm_r",
651 | "copper",
652 | "copper_r",
653 | "cubehelix",
654 | "cubehelix_r",
655 | "flag",
656 | "flag_r",
657 | "gist_earth",
658 | "gist_earth_r",
659 | "gist_gray",
660 | "gist_gray_r",
661 | "gist_heat",
662 | "gist_heat_r",
663 | "gist_ncar",
664 | "gist_ncar_r",
665 | "gist_rainbow",
666 | "gist_rainbow_r",
667 | "gist_stern",
668 | "gist_stern_r",
669 | "gist_yarg",
670 | "gist_yarg_r",
671 | "gnuplot",
672 | "gnuplot2",
673 | "gnuplot2_r",
674 | "gnuplot_r",
675 | "gray",
676 | "gray_r",
677 | "hot",
678 | "hot_r",
679 | "hsv",
680 | "hsv_r",
681 | "inferno",
682 | "inferno_r",
683 | "jet",
684 | "jet_r",
685 | "magma",
686 | "magma_r",
687 | "nipy_spectral",
688 | "nipy_spectral_r",
689 | "ocean",
690 | "ocean_r",
691 | "pink",
692 | "pink_r",
693 | "plasma",
694 | "plasma_r",
695 | "prism",
696 | "prism_r",
697 | "rainbow",
698 | "rainbow_r",
699 | "seismic",
700 | "seismic_r",
701 | "spectral",
702 | "spectral_r",
703 | "spring",
704 | "spring_r",
705 | "summer",
706 | "summer_r",
707 | "tab10",
708 | "tab10_r",
709 | "tab20",
710 | "tab20_r",
711 | "tab20b",
712 | "tab20b_r",
713 | "tab20c",
714 | "tab20c_r",
715 | "terrain",
716 | "terrain_r",
717 | "viridis",
718 | "viridis_r",
719 | "winter",
720 | "winter_r"
721 | ],
722 | "description": "Colormap:",
723 | "index": 0,
724 | "layout": "IPY_MODEL_9fca8922d6a84010a73ddb46f2713c40",
725 | "rows": 1,
726 | "style": "IPY_MODEL_a19b7c9088874ddaaf2efd2bce456ef7"
727 | }
728 | },
729 | "838f5f2263084d2eafad4d9a12fc3e7f": {
730 | "model_module": "@jupyter-widgets/controls",
731 | "model_module_version": "1.1.0",
732 | "model_name": "DescriptionStyleModel",
733 | "state": {
734 | "description_width": "initial"
735 | }
736 | },
737 | "861d8540f11944d8995a9f5c1385c829": {
738 | "model_module": "@jupyter-widgets/base",
739 | "model_module_version": "1.0.0",
740 | "model_name": "LayoutModel",
741 | "state": {
742 | "width": "25%"
743 | }
744 | },
745 | "86f142406e04427da6205fa66bac9620": {
746 | "model_module": "@jupyter-widgets/base",
747 | "model_module_version": "1.0.0",
748 | "model_name": "LayoutModel",
749 | "state": {
750 | "width": "100%"
751 | }
752 | },
753 | "88d0c8c574214d258d52e6cf10c90587": {
754 | "model_module": "@jupyter-widgets/controls",
755 | "model_module_version": "1.1.0",
756 | "model_name": "IntSliderModel",
757 | "state": {
758 | "continuous_update": false,
759 | "description": "Dataset index",
760 | "layout": "IPY_MODEL_b14873fb0b8347d2a1083d59fba8ad54",
761 | "max": 3,
762 | "style": "IPY_MODEL_402fa77b803048b2990b273187598d95",
763 | "value": 3
764 | }
765 | },
766 | "89b2034ca3124ff6848b20fb84a1a342": {
767 | "model_module": "@jupyter-widgets/controls",
768 | "model_module_version": "1.1.0",
769 | "model_name": "DescriptionStyleModel",
770 | "state": {
771 | "description_width": ""
772 | }
773 | },
774 | "8bdd74f89a3043e792eabd6a6226a6ab": {
775 | "model_module": "@jupyter-widgets/base",
776 | "model_module_version": "1.0.0",
777 | "model_name": "LayoutModel",
778 | "state": {
779 | "width": "100%"
780 | }
781 | },
782 | "8c242c24558644d68a3fa12cc2d805ba": {
783 | "model_module": "@jupyter-widgets/controls",
784 | "model_module_version": "1.1.0",
785 | "model_name": "LabelModel",
786 | "state": {
787 | "layout": "IPY_MODEL_4e5807bdbf3d4dc2b611e26fa56b6101",
788 | "style": "IPY_MODEL_486e1a6b578b44b397b02f1dbafc907b",
789 | "value": "of 4"
790 | }
791 | },
792 | "9b5de2cdaeba4cc2be73572dc68dd1e9": {
793 | "model_module": "@jupyter-widgets/controls",
794 | "model_module_version": "1.1.0",
795 | "model_name": "VBoxModel",
796 | "state": {
797 | "children": [
798 | "IPY_MODEL_b985873afa7a4297b6a3f47c5d6cdb89",
799 | "IPY_MODEL_d5b406961ccc42458002f37052e3d0a9",
800 | "IPY_MODEL_dea0a485bad246ce9e6f5273e581c7cf",
801 | "IPY_MODEL_15efb7a6a85b429c857fdc8e18c154c7"
802 | ],
803 | "layout": "IPY_MODEL_21191f7705374908a284ccf55829951c"
804 | }
805 | },
806 | "9bfc7d1321df402ebac286b2c8b7a47d": {
807 | "model_module": "@jupyter-widgets/controls",
808 | "model_module_version": "1.1.0",
809 | "model_name": "ButtonModel",
810 | "state": {
811 | "icon": "fast-backward",
812 | "layout": "IPY_MODEL_cc692e9fd5a3487ba19794423c815e29",
813 | "style": "IPY_MODEL_e7e084ac53694252b14094ad1bc1affd"
814 | }
815 | },
816 | "9fca8922d6a84010a73ddb46f2713c40": {
817 | "model_module": "@jupyter-widgets/base",
818 | "model_module_version": "1.0.0",
819 | "model_name": "LayoutModel",
820 | "state": {}
821 | },
822 | "a05f399be93e4461a73ff1e852749db5": {
823 | "model_module": "@jupyter-widgets/base",
824 | "model_module_version": "1.0.0",
825 | "model_name": "LayoutModel",
826 | "state": {
827 | "height": "40px"
828 | }
829 | },
830 | "a19b7c9088874ddaaf2efd2bce456ef7": {
831 | "model_module": "@jupyter-widgets/controls",
832 | "model_module_version": "1.1.0",
833 | "model_name": "DescriptionStyleModel",
834 | "state": {
835 | "description_width": ""
836 | }
837 | },
838 | "a836973f20574c71b33aa3cef64e7f1e": {
839 | "model_module": "@jupyter-widgets/controls",
840 | "model_module_version": "1.1.0",
841 | "model_name": "FloatTextModel",
842 | "state": {
843 | "description": "Rightmost color maps to:",
844 | "layout": "IPY_MODEL_d32a5abd74134a4686d75e191a1533cb",
845 | "step": null,
846 | "style": "IPY_MODEL_ac548405c8af4ee7a8ab8c38abae38c9",
847 | "value": 1
848 | }
849 | },
850 | "aa04171bbaa9441db93e9a7e35d43065": {
851 | "model_module": "@jupyter-widgets/controls",
852 | "model_module_version": "1.1.0",
853 | "model_name": "CheckboxModel",
854 | "state": {
855 | "description": "Show Targets",
856 | "disabled": false,
857 | "layout": "IPY_MODEL_9fca8922d6a84010a73ddb46f2713c40",
858 | "style": "IPY_MODEL_838f5f2263084d2eafad4d9a12fc3e7f",
859 | "value": false
860 | }
861 | },
862 | "ac548405c8af4ee7a8ab8c38abae38c9": {
863 | "model_module": "@jupyter-widgets/controls",
864 | "model_module_version": "1.1.0",
865 | "model_name": "DescriptionStyleModel",
866 | "state": {
867 | "description_width": "initial"
868 | }
869 | },
870 | "b0b819e6de95419a9eeb8a0002353ee1": {
871 | "model_module": "@jupyter-widgets/controls",
872 | "model_module_version": "1.1.0",
873 | "model_name": "HTMLModel",
874 | "state": {
875 | "layout": "IPY_MODEL_cc030e35783e49339e05ba715ee14e62",
876 | "style": "IPY_MODEL_de9d6e453fa14e79b965a4501ff69845",
877 | "value": "
"
878 | }
879 | },
880 | "b14873fb0b8347d2a1083d59fba8ad54": {
881 | "model_module": "@jupyter-widgets/base",
882 | "model_module_version": "1.0.0",
883 | "model_name": "LayoutModel",
884 | "state": {
885 | "width": "100%"
886 | }
887 | },
888 | "b6d05976b5cb40ff8a4ddee3c5a861d9": {
889 | "model_module": "@jupyter-widgets/base",
890 | "model_module_version": "1.0.0",
891 | "model_name": "LayoutModel",
892 | "state": {}
893 | },
894 | "b80cb0ae61994a978597829f7b6aa2c0": {
895 | "model_module": "@jupyter-widgets/controls",
896 | "model_module_version": "1.1.0",
897 | "model_name": "ButtonStyleModel",
898 | "state": {}
899 | },
900 | "b985873afa7a4297b6a3f47c5d6cdb89": {
901 | "model_module": "@jupyter-widgets/controls",
902 | "model_module_version": "1.1.0",
903 | "model_name": "AccordionModel",
904 | "state": {
905 | "_titles": {
906 | "0": "XOR"
907 | },
908 | "children": [
909 | "IPY_MODEL_ba1ed22eb0b542c0a6826001b434a399"
910 | ],
911 | "layout": "IPY_MODEL_2aa5894453424c4aacf351dd8c8ab922",
912 | "selected_index": null
913 | }
914 | },
915 | "b9be5483405141b88cf18e7d4f4963de": {
916 | "model_module": "@jupyter-widgets/controls",
917 | "model_module_version": "1.1.0",
918 | "model_name": "HBoxModel",
919 | "state": {
920 | "children": [
921 | "IPY_MODEL_aa04171bbaa9441db93e9a7e35d43065",
922 | "IPY_MODEL_66e29c1eb7fd494babefa05037841259"
923 | ],
924 | "layout": "IPY_MODEL_6b1a08b14f2647c3aace0739e77581de"
925 | }
926 | },
927 | "ba1ed22eb0b542c0a6826001b434a399": {
928 | "model_module": "@jupyter-widgets/controls",
929 | "model_module_version": "1.1.0",
930 | "model_name": "HBoxModel",
931 | "state": {
932 | "children": [
933 | "IPY_MODEL_3156b5f6d28642bd9633e8e5518c9097",
934 | "IPY_MODEL_326c868bba2744109476ec1a641314b3"
935 | ],
936 | "layout": "IPY_MODEL_4479c1d8a9d74da1800e20711176fe1a"
937 | }
938 | },
939 | "be9a58a938b645908d3b2fdf7158906c": {
940 | "model_module": "@jupyter-widgets/base",
941 | "model_module_version": "1.0.0",
942 | "model_name": "LayoutModel",
943 | "state": {}
944 | },
945 | "bea9ec4c51724a9797030debbd44b595": {
946 | "model_module": "@jupyter-widgets/controls",
947 | "model_module_version": "1.1.0",
948 | "model_name": "SliderStyleModel",
949 | "state": {
950 | "description_width": ""
951 | }
952 | },
953 | "bf5310d62e034c2582830a2037a8b002": {
954 | "model_module": "@jupyter-widgets/controls",
955 | "model_module_version": "1.1.0",
956 | "model_name": "DescriptionStyleModel",
957 | "state": {
958 | "description_width": "initial"
959 | }
960 | },
961 | "c0a2f4ee45914dcaa1a8bdacac5046ed": {
962 | "model_module": "@jupyter-widgets/controls",
963 | "model_module_version": "1.1.0",
964 | "model_name": "DescriptionStyleModel",
965 | "state": {
966 | "description_width": ""
967 | }
968 | },
969 | "c22302df51a94a2280910415876239df": {
970 | "model_module": "@jupyter-widgets/base",
971 | "model_module_version": "1.0.0",
972 | "model_name": "LayoutModel",
973 | "state": {
974 | "width": "100%"
975 | }
976 | },
977 | "c491fff0bf9f47339ab116e4a2b728c9": {
978 | "model_module": "@jupyter-widgets/controls",
979 | "model_module_version": "1.1.0",
980 | "model_name": "ButtonModel",
981 | "state": {
982 | "icon": "fast-forward",
983 | "layout": "IPY_MODEL_63880f7d52ea4c7da360d2269be5cbd2",
984 | "style": "IPY_MODEL_b80cb0ae61994a978597829f7b6aa2c0"
985 | }
986 | },
987 | "c712050936334e1d859ad0bdbba33a50": {
988 | "model_module": "@jupyter-widgets/controls",
989 | "model_module_version": "1.1.0",
990 | "model_name": "IntTextModel",
991 | "state": {
992 | "description": "Feature columns:",
993 | "layout": "IPY_MODEL_be9a58a938b645908d3b2fdf7158906c",
994 | "step": 1,
995 | "style": "IPY_MODEL_788f81e5b21a4638b040e17ac78b8ce6",
996 | "value": 3
997 | }
998 | },
999 | "cc030e35783e49339e05ba715ee14e62": {
1000 | "model_module": "@jupyter-widgets/base",
1001 | "model_module_version": "1.0.0",
1002 | "model_name": "LayoutModel",
1003 | "state": {}
1004 | },
1005 | "cc692e9fd5a3487ba19794423c815e29": {
1006 | "model_module": "@jupyter-widgets/base",
1007 | "model_module_version": "1.0.0",
1008 | "model_name": "LayoutModel",
1009 | "state": {
1010 | "width": "100%"
1011 | }
1012 | },
1013 | "d32a5abd74134a4686d75e191a1533cb": {
1014 | "model_module": "@jupyter-widgets/base",
1015 | "model_module_version": "1.0.0",
1016 | "model_name": "LayoutModel",
1017 | "state": {}
1018 | },
1019 | "d5b406961ccc42458002f37052e3d0a9": {
1020 | "model_module": "@jupyter-widgets/controls",
1021 | "model_module_version": "1.1.0",
1022 | "model_name": "VBoxModel",
1023 | "state": {
1024 | "children": [
1025 | "IPY_MODEL_6f384b6b080b4a72bfaae3920f9b7163",
1026 | "IPY_MODEL_137cc55f71044b329686552e4d5ed5fe"
1027 | ],
1028 | "layout": "IPY_MODEL_c22302df51a94a2280910415876239df"
1029 | }
1030 | },
1031 | "da640a5ac3bc4a59ab8a82bc8fcbf746": {
1032 | "model_module": "@jupyter-widgets/base",
1033 | "model_module_version": "1.0.0",
1034 | "model_name": "LayoutModel",
1035 | "state": {}
1036 | },
1037 | "dd45f7f9734f48fba70aaf8ae65dbb97": {
1038 | "model_module": "@jupyter-widgets/controls",
1039 | "model_module_version": "1.1.0",
1040 | "model_name": "IntTextModel",
1041 | "state": {
1042 | "description": "Horizontal space between banks:",
1043 | "layout": "IPY_MODEL_9fca8922d6a84010a73ddb46f2713c40",
1044 | "step": 1,
1045 | "style": "IPY_MODEL_bf5310d62e034c2582830a2037a8b002",
1046 | "value": 150
1047 | }
1048 | },
1049 | "dd4b1f529c7941569816da9769084b90": {
1050 | "model_module": "@jupyter-widgets/controls",
1051 | "model_module_version": "1.1.0",
1052 | "model_name": "IntTextModel",
1053 | "state": {
1054 | "layout": "IPY_MODEL_86f142406e04427da6205fa66bac9620",
1055 | "step": 1,
1056 | "style": "IPY_MODEL_89b2034ca3124ff6848b20fb84a1a342",
1057 | "value": 3
1058 | }
1059 | },
1060 | "de9d6e453fa14e79b965a4501ff69845": {
1061 | "model_module": "@jupyter-widgets/controls",
1062 | "model_module_version": "1.1.0",
1063 | "model_name": "DescriptionStyleModel",
1064 | "state": {
1065 | "description_width": ""
1066 | }
1067 | },
1068 | "dea0a485bad246ce9e6f5273e581c7cf": {
1069 | "model_module": "@jupyter-widgets/controls",
1070 | "model_module_version": "1.1.0",
1071 | "model_name": "HTMLModel",
1072 | "state": {
1073 | "layout": "IPY_MODEL_16832b10a05e46b19183f1726ed80faf",
1074 | "style": "IPY_MODEL_3b7e0bdd891743bda0dd4a9d15dd0a42",
1075 | "value": "
"
1076 | }
1077 | },
1078 | "df5e3f91eea7415a888271f8fc68f9a5": {
1079 | "model_module": "@jupyter-widgets/controls",
1080 | "model_module_version": "1.1.0",
1081 | "model_name": "DescriptionStyleModel",
1082 | "state": {
1083 | "description_width": ""
1084 | }
1085 | },
1086 | "e4910a4128764aad9d412609da7c9a53": {
1087 | "model_module": "@jupyter-widgets/base",
1088 | "model_module_version": "1.0.0",
1089 | "model_name": "LayoutModel",
1090 | "state": {}
1091 | },
1092 | "e7e084ac53694252b14094ad1bc1affd": {
1093 | "model_module": "@jupyter-widgets/controls",
1094 | "model_module_version": "1.1.0",
1095 | "model_name": "ButtonStyleModel",
1096 | "state": {}
1097 | },
1098 | "eba95634dd3241f2911fc17342ddf924": {
1099 | "model_module": "@jupyter-widgets/controls",
1100 | "model_module_version": "1.1.0",
1101 | "model_name": "ButtonStyleModel",
1102 | "state": {}
1103 | },
1104 | "f3801cae672644048cd4eb49c89f6fb5": {
1105 | "model_module": "@jupyter-widgets/controls",
1106 | "model_module_version": "1.1.0",
1107 | "model_name": "ButtonModel",
1108 | "state": {
1109 | "icon": "backward",
1110 | "layout": "IPY_MODEL_8bdd74f89a3043e792eabd6a6226a6ab",
1111 | "style": "IPY_MODEL_15dbc9bb00744ca8a4613fd572a49fc0"
1112 | }
1113 | },
1114 | "f5e9b479caa3491496610a1bca70f6f6": {
1115 | "model_module": "@jupyter-widgets/base",
1116 | "model_module_version": "1.0.0",
1117 | "model_name": "LayoutModel",
1118 | "state": {
1119 | "width": "100%"
1120 | }
1121 | },
1122 | "f6b3e8fc467742ac88c9cf3bf03cfa03": {
1123 | "model_module": "@jupyter-widgets/base",
1124 | "model_module_version": "1.0.0",
1125 | "model_name": "LayoutModel",
1126 | "state": {
1127 | "height": "50px",
1128 | "width": "100%"
1129 | }
1130 | }
1131 | },
1132 | "version_major": 2,
1133 | "version_minor": 0
1134 | }
1135 | }
1136 | },
1137 | "nbformat": 4,
1138 | "nbformat_minor": 2
1139 | }
1140 |
--------------------------------------------------------------------------------
/binder/postBuild:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | jupyter nbextension enable --py widgetsnbextension
4 |
5 | # run matplotlib once to generate the font cache
6 | python -c "import matplotlib as mpl; mpl.use('Agg'); import pylab as plt; fig, ax = plt.subplots(); fig.savefig('test.png')"
7 |
8 | test -e test.png && rm test.png
9 |
10 | python -c "import conx as cx; cx.Dataset.get('mnist')"
11 | python -c "import conx as cx; cx.Dataset.get('cifar10')"
12 | python -c "import conx as cx; cx.Dataset.get('cmu_faces_full_size')"
13 | python -c "import conx as cx; cx.Dataset.get('cmu_faces_half_size')"
14 | python -c "import conx as cx; cx.Dataset.get('cmu_faces_quarter_size')"
15 |
--------------------------------------------------------------------------------
/binder/requirements.txt:
--------------------------------------------------------------------------------
1 | conx
2 | tensorflow
3 | numpy
4 | keras
5 | matplotlib
6 | ipywidgets>=7.0
7 | Pillow
8 | IPython
9 | h5py
10 | sklearn
11 | svgwrite
12 | tqdm
13 | requests
14 | pypandoc
15 | sphinx_rtd_theme
16 | nbsphinx
17 | jupyter_sphinx
18 | sphinxcontrib-napoleon
19 | recommonmark
20 | metakernel
21 | pydot
22 | jyro
23 | comet_ml
24 |
--------------------------------------------------------------------------------
/conx/__init__.py:
--------------------------------------------------------------------------------
1 | # conx - a neural network library
2 | #
3 | # Copyright (c) 2016-2017 Douglas S. Blank
4 | #
5 | # This program is free software; you can redistribute it and/or modify
6 | # it under the terms of the GNU General Public License as published by
7 | # the Free Software Foundation; either version 3 of the License, or
8 | # (at your option) any later version.
9 | #
10 | # This program is distributed in the hope that it will be useful,
11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 | # GNU General Public License for more details.
14 | #
15 | # You should have received a copy of the GNU General Public License
16 | # along with this program; if not, write to the Free Software
17 | # Foundation, Inc., 51 Franklin Street, Fifth Floor,
18 | # Boston, MA 02110-1301 USA
19 |
20 | import warnings
21 | ## Useless numpy warnings:
22 | warnings.simplefilter(action='ignore', category=FutureWarning)
23 | ## When a model has not yet been compiled:
24 | warnings.filterwarnings("ignore", "No training configuration found in save file.*")
25 |
26 | import sys
27 | import os
28 | import matplotlib
29 | ## If no DISPLAY, then set the matplotlib backend
30 | ## to an alternate to work if in console (Tk, Qt, etc).
31 | if False: # sys.platform == "darwin":
32 | pass # let's not mess with OSX
33 | else:
34 | if (("DISPLAY" not in os.environ) or
35 | (os.environ["DISPLAY"] == "")):
36 | if (matplotlib.get_backend() in [
37 | 'module://ipykernel.pylab.backend_inline',
38 | 'NbAgg',
39 | ]):
40 | pass ## Don't change if server has no DISPLAY but is connected to notebook
41 | else:
42 | matplotlib.use('Agg') # something that will work
43 | from ._version import __version__, VERSION
44 | from .network import *
45 | from .layers import *
46 | from .dataset import *
47 |
48 | print("ConX, version %s" % __version__, file=sys.stderr)
49 | sys.stderr.flush()
50 |
--------------------------------------------------------------------------------
/conx/_version.py:
--------------------------------------------------------------------------------
1 | # conx - a neural network library
2 | #
3 | # Copyright (c) 2016-2017 Douglas S. Blank
4 | #
5 | # This program is free software; you can redistribute it and/or modify
6 | # it under the terms of the GNU General Public License as published by
7 | # the Free Software Foundation; either version 3 of the License, or
8 | # (at your option) any later version.
9 | #
10 | # This program is distributed in the hope that it will be useful,
11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 | # GNU General Public License for more details.
14 | #
15 | # You should have received a copy of the GNU General Public License
16 | # along with this program; if not, write to the Free Software
17 | # Foundation, Inc., 51 Franklin Street, Fifth Floor,
18 | # Boston, MA 02110-1301 USA
19 |
20 | __version__ = "3.7.10"
21 | VERSION = tuple([(int(v) if v.isdigit() else v)
22 | for v in __version__.split(".")])
23 |
--------------------------------------------------------------------------------
/conx/activations.py:
--------------------------------------------------------------------------------
1 |
2 | import keras.backend as K
3 | from keras.activations import (softmax as k_softmax,
4 | selu as k_selu)
5 |
6 | def softmax(tensor, axis=-1):
7 | """
8 | Softmax activation function.
9 |
10 | >>> len(softmax([0.1, 0.1, 0.7, 0.0]))
11 | 4
12 | """
13 | return K.eval(k_softmax(K.variable([tensor]), axis))[0].tolist()
14 |
15 | def elu(x, alpha=1.0):
16 | """
17 | Exponential Linear Unit activation function.
18 |
19 | See: https://arxiv.org/abs/1511.07289v1
20 |
21 | def elu(x):
22 | if x >= 0:
23 | return x
24 | else:
25 | return alpha * (math.exp(x) - 1.0)
26 |
27 | >>> elu(0.0)
28 | 0.0
29 | >>> elu(1.0)
30 | 1.0
31 | >>> elu(0.5, alpha=0.3)
32 | 0.5
33 | >>> round(elu(-1), 1)
34 | -0.6
35 | """
36 | return K.eval(K.elu(K.variable(x), alpha)).tolist()
37 |
38 | def selu(x):
39 | """
40 | Scaled Exponential Linear Unit activation function.
41 |
42 | >>> selu(0)
43 | 0.0
44 | """
45 | return K.eval(k_selu(K.variable(x))).tolist()
46 |
47 | def softplus(x):
48 | """
49 | Softplus activation function.
50 |
51 | >>> round(softplus(0), 1)
52 | 0.7
53 | """
54 | return K.eval(K.softplus(K.variable(x))).tolist()
55 |
56 | def softsign(x):
57 | """
58 | Softsign activation function.
59 |
60 | >>> softsign(1)
61 | 0.5
62 | >>> softsign(-1)
63 | -0.5
64 | """
65 | return K.eval(K.softsign(K.variable(x))).tolist()
66 |
67 | def relu(x, alpha=0., max_value=None):
68 | """
69 | Rectified Linear Unit activation function.
70 |
71 | >>> relu(1)
72 | 1.0
73 | >>> relu(-1)
74 | 0.0
75 | """
76 | return K.eval(K.relu(K.variable(x), alpha, max_value)).tolist()
77 |
78 | def tanh(x):
79 | """
80 | Tanh activation function.
81 |
82 | >>> tanh(0)
83 | 0.0
84 | """
85 | return K.eval(K.tanh(K.variable(x))).tolist()
86 |
87 | def sigmoid(x):
88 | """
89 | Sigmoid activation function.
90 |
91 | >>> sigmoid(0)
92 | 0.5
93 | """
94 | return K.eval(K.sigmoid(K.variable(x))).tolist()
95 |
96 | def hard_sigmoid(x):
97 | """
98 | Hard Sigmoid activation function.
99 |
100 | >>> round(hard_sigmoid(-1), 1)
101 | 0.3
102 | """
103 | return K.eval(K.hard_sigmoid(K.variable(x))).tolist()
104 |
105 | def linear(x):
106 | """
107 | Linear activation function.
108 |
109 | >>> linear(1) == 1
110 | True
111 | >>> linear(-1) == -1
112 | True
113 | """
114 | return x
115 |
--------------------------------------------------------------------------------
/conx/datasets/__init__.py:
--------------------------------------------------------------------------------
1 |
2 | ## All functions here must be loadable datasets
3 | ## All modules must be named differently from their functions!
4 | ## Otherwise, confuses tools like nose, inspect, etc.
5 |
6 | from ._mnist import mnist, mnist_h5
7 | from ._cifar10 import cifar10
8 | from ._cifar100 import cifar100
9 | from .cmu_faces import cmu_faces_full_size
10 | from .cmu_faces import cmu_faces_half_size
11 | from .cmu_faces import cmu_faces_quarter_size
12 | from ._gridfonts import gridfonts, figure_ground_a
13 | from ._fingers import fingers
14 | from ._colors import colors
15 |
--------------------------------------------------------------------------------
/conx/datasets/_cifar10.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import conx as cx
3 | from keras.utils import to_categorical
4 |
5 | def cifar10(*args, **kwargs):
6 | dataset = cx.Dataset()
7 | from keras.datasets import cifar10
8 | (x_train, y_train), (x_test, y_test) = cifar10.load_data()
9 | inputs = np.concatenate((x_train, x_test))
10 | x_train, x_test = None, None
11 | inputs = inputs.astype('float32')
12 | inputs /= 255
13 | labels = np.concatenate((y_train, y_test))
14 | y_train, y_test = None, None
15 | targets = to_categorical(labels, 10)
16 | labels = np.array([str(label[0]) for label in labels], dtype=str)
17 | dataset.name = "CIFAR-10"
18 | dataset.description = """
19 | Original source: https://www.cs.toronto.edu/~kriz/cifar.html
20 |
21 | The CIFAR-10 dataset consists of 60000 32x32 colour images in 10
22 | classes, with 6000 images per class.
23 |
24 | The classes are completely mutually exclusive. There is no overlap
25 | between automobiles and trucks. "Automobile" includes sedans, SUVs,
26 | things of that sort. "Truck" includes only big trucks. Neither
27 | includes pickup trucks.
28 | """
29 | dataset.load_direct([inputs], [targets], [labels])
30 | return dataset
31 |
--------------------------------------------------------------------------------
/conx/datasets/_cifar100.py:
--------------------------------------------------------------------------------
1 | import conx as cx
2 | import numpy as np
3 | from keras.utils import to_categorical
4 |
5 | def cifar100(*args, **kwargs):
6 | dataset = cx.Dataset()
7 | from keras.datasets import cifar100
8 | (x_train, y_train), (x_test, y_test) = cifar100.load_data()
9 | inputs = np.concatenate((x_train, x_test))
10 | labels = np.concatenate((y_train, y_test))
11 | targets = to_categorical(labels, 100)
12 | labels = np.array([str(label[0]) for label in labels], dtype=str)
13 | inputs = inputs.astype('float32')
14 | inputs /= 255
15 | dataset.name = "CIFAR-100"
16 | dataset.description = """
17 | Original source: https://www.cs.toronto.edu/~kriz/cifar.html
18 |
19 | This dataset is just like the CIFAR-10, except it has 100 classes
20 | containing 600 images each. The 100 classes in the CIFAR-100 are grouped
21 | into 20 superclasses. Each image comes with a "fine" label (the class
22 | to which it belongs) and a "coarse" label (the superclass to which it
23 | belongs). Here is the list of classes in the CIFAR-100:
24 |
25 | Superclass | Classes
26 | -------------------------------|-----------------------------------------------------
27 | aquatic mammals | beaver, dolphin, otter, seal, whale
28 | fish | aquarium fish, flatfish, ray, shark, trout
29 | flowers | orchids, poppies, roses, sunflowers, tulips
30 | food containers | bottles, bowls, cans, cups, plates
31 | fruit and vegetables | apples, mushrooms, oranges, pears, sweet peppers
32 | household electrical devices | clock, computer keyboard, lamp, telephone, television
33 | household furniture | bed, chair, couch, table, wardrobe
34 | insects | bee, beetle, butterfly, caterpillar, cockroach
35 | large carnivores | bear, leopard, lion, tiger, wolf
36 | large man-made outdoor things | bridge, castle, house, road, skyscraper
37 | large natural outdoor scenes | cloud, forest, mountain, plain, sea
38 | large omnivores and herbivores | camel, cattle, chimpanzee, elephant, kangaroo
39 | medium-sized mammals | fox, porcupine, possum, raccoon, skunk
40 | non-insect invertebrates | crab, lobster, snail, spider, worm
41 | people | baby, boy, girl, man, woman
42 | reptiles | crocodile, dinosaur, lizard, snake, turtle
43 | small mammals | hamster, mouse, rabbit, shrew, squirrel
44 | trees | maple, oak, palm, pine, willow
45 | vehicles 1 | bicycle, bus, motorcycle, pickup truck, train
46 | vehicles 2 | lawn-mower, rocket, streetcar, tank, tractor
47 |
48 | """
49 | dataset.load_direct([inputs], [targets], [labels])
50 | return dataset
51 |
--------------------------------------------------------------------------------
/conx/datasets/_colors.py:
--------------------------------------------------------------------------------
1 | import conx as cx
2 | import numpy as np
3 | import csv
4 |
5 | def colors(*args, path='colors.csv',
6 | url="https://raw.githubusercontent.com/Calysto/conx-data/master/colors/colors.csv",
7 | **kwargs):
8 | dataset = cx.Dataset()
9 | from keras.utils import get_file
10 | path = get_file(path, origin=url)
11 | fp = open(path, "r")
12 | reader = csv.reader(fp)
13 | inputs = []
14 | labels = []
15 | targets = []
16 | count = 1
17 | for line in reader:
18 | name, r, g, b = line
19 | if name == "name": continue # first line is header
20 | inputs.append([float(int(r)/255), float(int(g)/255), float(int(b)/255)])
21 | targets.append([count])
22 | labels.append(name)
23 | count += 1
24 | inputs = np.array(inputs, dtype='float32')
25 | targets = np.array(targets, dtype='uint16')
26 | dataset.name = "Colors"
27 | dataset.description = """
28 | Original source: https://github.com/andrewortman/colorbot
29 |
30 | This dataset also includes some ignored in original data.
31 |
32 | Inspired by:
33 |
34 | * http://aiweirdness.com/tagged/paint-colors
35 |
36 | When initially loaded, this database has the following format:
37 |
38 | * labels: [color_name_string, ...] # order matches target
39 | * inputs: [[red, green, blue], ...] # scaled between 0 and 1
40 | * targets: [[int], ...] # number of label
41 |
42 | For example:
43 |
44 | ```
45 | >>> import conx as cx
46 | >>> ds = cx.Dataset.get("colors")
47 | >>> ds.labels[0], ds.inputs[0], ds.targets[0]
48 | ('tidewater',
49 | [0.7686274647712708, 0.843137264251709, 0.8352941274642944],
50 | [1])
51 | ```
52 | """
53 | dataset.load_direct([inputs], [targets], [labels])
54 | return dataset
55 |
--------------------------------------------------------------------------------
/conx/datasets/_fingers.py:
--------------------------------------------------------------------------------
1 | import conx as cx
2 | import numpy as np
3 |
4 | def fingers(*args, path='fingers.npz', **kwargs):
5 | dataset = cx.Dataset()
6 | inputs, labels = load_dataset_npz(
7 | path,
8 | "https://raw.githubusercontent.com/Calysto/conx-data/master/fingers/fingers.npz")
9 | inputs = inputs.astype('float32')
10 | inputs /= 255
11 | make_target_vector = lambda label: [int(label == n) for n in range(6)]
12 | targets = np.array([make_target_vector(l) for l in labels]).astype('uint8')
13 | dataset.name = "Fingers"
14 | dataset.description = """
15 | This dataset contains 12,000 RGB images of human hands showing different
16 | numbers of fingers, from zero to five. The same fingers are always used
17 | to represent each number category (e.g., all images of "two" have raised
18 | index and middle fingers). Each image is a 30 x 40 x 3 array of
19 | floating-point numbers in the range 0 to 1. The target data consists of
20 | one-hot binary vectors of size 6 corresponding to the classification
21 | categories "zero" through "five". There are 2000 images for each category.
22 |
23 | Created by Shreeda Segan and Albert Yu at Sarah Lawrence College.
24 | """
25 | dataset.load_direct([inputs], [targets], [labels])
26 | return dataset
27 |
28 |
29 | def load_dataset_npz(path, url):
30 | """loads an .npz file of saved image data, and returns the images and their
31 | associated labels as numpy arrays
32 | """
33 | from keras.utils import get_file
34 | path = get_file(path, origin=url)
35 | f = np.load(path, allow_pickle=True)
36 | images, labels = f['data'], f['labels']
37 | return images, labels
38 |
--------------------------------------------------------------------------------
/conx/datasets/_gridfonts.py:
--------------------------------------------------------------------------------
1 | import conx as cx
2 | import numpy as np
3 | from keras.utils import get_file
4 |
5 | def gridfonts(*args, **kwargs):
6 | dataset = cx.Dataset()
7 | url = "https://raw.githubusercontent.com/Calysto/conx-data/master/gridfonts/gridfonts.npy"
8 | path = get_file("gridfonts.npy", origin=url)
9 | ds = np.load(path, allow_pickle=True)
10 | ## [letters, labels]
11 | letters = np.array([matrix for matrix in ds[0]])
12 | targets = np.array([matrix for matrix in ds[0]])
13 | labels = np.array([char for char in ds[1]], dtype=str)
14 | dataset.name = "Gridfonts"
15 | dataset.description = """
16 | This dataset originates from Douglas Hofstadter's research
17 | group:
18 |
19 | http://goosie.cogsci.indiana.edu/pub/gridfonts.data
20 |
21 | 
22 |
23 | These data have been processed to make them neural
24 | network friendly:
25 |
26 | https://github.com/Calysto/conx-data/blob/master/gridfonts/gridfonts.py
27 |
28 | The dataset is composed of letters on a 25 row x 9 column
29 | grid. The inputs and targets are identical, and the labels
30 | contain a string identifying the letter.
31 |
32 | You can read a thesis using part of this dataset here:
33 | https://repository.brynmawr.edu/compsci_pubs/78/
34 | """
35 | dataset.load_direct([letters], [targets], [labels])
36 | return dataset
37 |
38 | def figure_ground_a(*args, **kwargs):
39 | dataset = cx.Dataset()
40 | url = "https://raw.githubusercontent.com/Calysto/conx-data/master/gridfonts/figure_ground_a.npy"
41 | path = get_file("figure_ground_a.npy", origin=url)
42 | ds = np.load(path, allow_pickle=True)
43 | ## [[[letter], [brim, body]], ...]
44 | letters = np.array([pair[0] for pair in ds])
45 | brims = np.array([pair[1][0] for pair in ds])
46 | bodies = np.array([pair[1][1] for pair in ds])
47 | dataset.name = "Figure-Ground A"
48 | dataset.description = """
49 | This dataset (the so-called a-tabase) originates from Douglas
50 | Hofstadter's research group:
51 |
52 | http://goosie.cogsci.indiana.edu/pub/gridfonts.data
53 |
54 | 
55 |
56 | These data (all the letter A) have been processed to make them neural
57 | network friendly:
58 |
59 | https://github.com/Calysto/conx-data/blob/master/gridfonts/gridfonts.py
60 |
61 | The brim and body parts have been idenified manually. The dataset is
62 | composed of letters on a 17 row x 9 column grid (4 lines not used on
63 | top and another 4 not used on the bottom of each letter were removed
64 | from the original 25x9 latter images). The inputs are composed of the
65 | full letter. The targets are composed of a picture of the body and
66 | the brim.
67 |
68 | You can read a thesis using part of this dataset here:
69 | https://repository.brynmawr.edu/compsci_pubs/78/
70 | """
71 | dataset.load_direct([letters], [brims, bodies])
72 | return dataset
73 |
--------------------------------------------------------------------------------
/conx/datasets/_mnist.py:
--------------------------------------------------------------------------------
1 | import conx as cx
2 | import numpy as np
3 | from keras.datasets import mnist
4 | from keras.utils import (to_categorical, get_file)
5 |
6 | description = """
7 | Original source: http://yann.lecun.com/exdb/mnist/
8 |
9 | The MNIST dataset contains 70,000 images of handwritten digits (zero
10 | to nine) that have been size-normalized and centered in a square grid
11 | of pixels. Each image is a 28 × 28 × 1 array of floating-point numbers
12 | representing grayscale intensities ranging from 0 (black) to 1
13 | (white). The target data consists of one-hot binary vectors of size
14 | 10, corresponding to the digit classification categories zero through
15 | nine. Some example MNIST images are shown below:
16 |
17 | 
18 | """
19 |
20 | def mnist_h5(*args, **kwargs):
21 | """
22 | Load the Keras MNIST dataset from an H5 file.
23 | """
24 | import h5py
25 |
26 | path = "mnist.h5"
27 | url = "https://raw.githubusercontent.com/Calysto/conx-data/master/mnist/mnist.h5"
28 | path = get_file(path, origin=url)
29 | h5 = h5py.File(path, "r")
30 | dataset = cx.Dataset()
31 | dataset._inputs = h5["inputs"]
32 | dataset._targets = h5["targets"]
33 | dataset._labels = h5["labels"]
34 | dataset.h5 = h5
35 | dataset.name = "MNIST-H5"
36 | dataset.description = description
37 | dataset._cache_values()
38 | return dataset
39 |
40 | def mnist(*args, **kwargs):
41 | from keras.datasets import mnist
42 | import keras.backend as K
43 |
44 | # input image dimensions
45 | img_rows, img_cols = 28, 28
46 | # the data, shuffled and split between train and test sets
47 | (x_train, y_train), (x_test, y_test) = mnist.load_data()
48 | x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
49 | x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
50 | input_shape = (img_rows, img_cols, 1)
51 | x_train = x_train.astype('float16')
52 | x_test = x_test.astype('float16')
53 | inputs = np.concatenate((x_train,x_test)) / 255
54 | labels = np.concatenate((y_train,y_test)) # ints, 0 to 10
55 | ###########################################
56 | # fix mis-labeled image(s) in Keras dataset
57 | labels[10994] = 9
58 | ###########################################
59 | targets = to_categorical(labels).astype("uint8")
60 | labels = np.array([str(label) for label in labels], dtype=str)
61 | dataset = cx.Dataset()
62 | dataset.load_direct([inputs], [targets], [labels])
63 | return dataset
64 |
--------------------------------------------------------------------------------
/conx/datasets/cmu_faces.py:
--------------------------------------------------------------------------------
1 | import conx as cx
2 | import numpy as np
3 |
4 | def cmu_faces_full_size(*args, path="cmu_faces_full_size.npz", **kwargs):
5 | dataset = cx.Dataset()
6 | inputs, labels = load_dataset_npz(
7 | path,
8 | "https://raw.githubusercontent.com/Calysto/conx-data/master/cmu_faces/cmu_faces_full_size.npz")
9 | dataset.name = "CMU Faces, full-size"
10 | dataset.description = """
11 | Original source: http://archive.ics.uci.edu/ml/datasets/cmu+face+images
12 | """
13 | return process_face_data(dataset, inputs, labels)
14 |
15 | def cmu_faces_quarter_size(*args, path="cmu_faces_quarter_size.npz", **kwargs):
16 | dataset = cx.Dataset()
17 | inputs, labels = load_dataset_npz(
18 | path,
19 | "https://raw.githubusercontent.com/Calysto/conx-data/master/cmu_faces/cmu_faces_quarter_size.npz")
20 | dataset.name = "CMU Faces, quarter-size"
21 | dataset.description = """
22 | Original source: http://archive.ics.uci.edu/ml/datasets/cmu+face+images
23 | """
24 | return process_face_data(dataset, inputs, labels)
25 |
26 | def cmu_faces_half_size(*args, path="cmu_faces_half_size.npz", **kwargs):
27 | dataset = cx.Dataset()
28 | inputs, labels = load_dataset_npz(
29 | path,
30 | "https://raw.githubusercontent.com/Calysto/conx-data/master/cmu_faces/cmu_faces_half_size.npz")
31 | dataset.name = "CMU Faces, half-size"
32 | dataset.description = """
33 | Original source: http://archive.ics.uci.edu/ml/datasets/cmu+face+images
34 | """
35 | return process_face_data(dataset, inputs, labels)
36 |
37 | def load_dataset_npz(path, url):
38 | """loads an .npz file of saved image data, and returns the images and their
39 | associated labels as numpy arrays
40 | """
41 | from keras.utils import get_file
42 | path = get_file(path, origin=url)
43 | f = np.load(path, allow_pickle=True)
44 | images, labels = f['data'], f['labels']
45 | return images, labels
46 |
47 | def create_pose_targets(labels):
48 | """converts a list of label strings to one-hot pose target vectors"""
49 | pose_names = ['left', 'forward', 'up', 'right']
50 | make_target_vector = lambda x: [int(x == name) for name in pose_names]
51 | poses = [s.split('_')[1] for s in labels]
52 | return np.array([make_target_vector(p) for p in poses]).astype('uint8')
53 |
54 | def process_face_data(dataset, inputs, labels):
55 | targets = create_pose_targets(labels)
56 | dataset.load_direct([inputs], [targets], [labels])
57 | return dataset
58 |
--------------------------------------------------------------------------------
/conx/layers.py:
--------------------------------------------------------------------------------
1 | # conx - a neural network library
2 | #
3 | # Copyright (c) Douglas S. Blank
4 | #
5 | # This program is free software; you can redistribute it and/or modify
6 | # it under the terms of the GNU General Public License as published by
7 | # the Free Software Foundation; either version 3 of the License, or
8 | # (at your option) any later version.
9 | #
10 | # This program is distributed in the hope that it will be useful,
11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 | # GNU General Public License for more details.
14 | #
15 | # You should have received a copy of the GNU General Public License
16 | # along with this program; if not, write to the Free Software
17 | # Foundation, Inc., 51 Franklin Street, Fifth Floor,
18 | # Boston, MA 02110-1301 USA
19 |
20 | """
21 | The conx.layers module contains the code for all of the layers.
22 | In addition, it dynamically loads all of the Keras layers and
23 | wraps them as a conx layer.
24 | """
25 |
26 | #------------------------------------------------------------------------
27 |
28 | import numbers
29 | import operator
30 | from functools import reduce
31 | import sys
32 | import inspect
33 | import string
34 | import html
35 | import copy
36 | import sys
37 | import re
38 | import os
39 |
40 | import numpy as np
41 | import keras
42 | import keras.backend as K
43 | from keras.optimizers import (SGD, RMSprop, Adagrad, Adadelta, Adam, Adamax, Nadam,
44 | TFOptimizer)
45 |
46 | from .utils import *
47 |
48 | #------------------------------------------------------------------------
49 | ON_RTD = os.environ.get('READTHEDOCS', None) == 'True'
50 | #------------------------------------------------------------------------
51 |
52 | pypandoc = None
53 | if ON_RTD: ## takes too long to load, unless really needed
54 | try:
55 | import pypandoc
56 | except:
57 | pass # won't turn Keras comments into rft for documentation
58 |
59 | def make_layer(config):
60 | import conx.layers
61 | layer = getattr(conx.layers, config["class"])
62 | return layer(config["name"], *config["args"], **config["params"])
63 |
64 | class _BaseLayer():
65 | """
66 | The base class for all conx layers.
67 |
68 | See :any:`Layer` for more details.
69 | """
70 | ACTIVATION_FUNCTIONS = ('relu', 'sigmoid', 'linear', 'softmax', 'tanh',
71 | 'elu', 'selu', 'softplus', 'softsign', 'hard_sigmoid')
72 | CLASS = None
73 |
74 | def __init__(self, name, *args, **params):
75 | self.config = {
76 | "class": self.__class__.__name__,
77 | "name": name,
78 | "args": args,
79 | "params": copy.copy(params),
80 | }
81 | if not (isinstance(name, str) and len(name) > 0):
82 | raise Exception('bad layer name: %s' % (name,))
83 | self._check_layer_name(name)
84 | self.name = name
85 | self.params = params
86 | self.args = args
87 | self.handle_merge = False
88 | self.network = None
89 | params["name"] = name
90 | self.shape = None
91 | self.vshape = None
92 | self.keep_aspect_ratio = False
93 | self.image_maxdim = None
94 | self.image_pixels_per_unit = None
95 | self.visible = True
96 | self.colormap = None
97 | self.minmax = None
98 | self.model = None
99 | self.decode_model = None
100 | self.input_names = []
101 | self.feature = 0
102 | self.keras_layer = None
103 | self.max_draw_units = 20
104 | # used to determine image ranges:
105 | self.activation = params.get("activation", None) # make a copy, if one, and str
106 | if not isinstance(self.activation, str):
107 | self.activation = None
108 | # set visual shape for display purposes
109 | if 'vshape' in params:
110 | vs = params['vshape']
111 | del params["vshape"] # drop those that are not Keras parameters
112 | if not valid_vshape(vs):
113 | raise Exception('bad vshape: %s' % (vs,))
114 | else:
115 | self.vshape = vs
116 |
117 | if 'keep_aspect_ratio' in params:
118 | ar = params['keep_aspect_ratio']
119 | del params["keep_aspect_ratio"] # drop those that are not Keras parameters
120 | self.keep_aspect_ratio = ar
121 |
122 | if 'image_maxdim' in params:
123 | imd = params['image_maxdim']
124 | del params["image_maxdim"] # drop those that are not Keras parameters
125 | if not isinstance(imd, numbers.Integral):
126 | raise Exception('bad image_maxdim: %s' % (imd,))
127 | else:
128 | self.image_maxdim = imd
129 |
130 | if 'image_pixels_per_unit' in params:
131 | imd = params['image_pixels_per_unit']
132 | del params["image_pixels_per_image"] # drop those that are not Keras parameters
133 | if not isinstance(imd, numbers.Integral):
134 | raise Exception('bad image_pixels_per_unit: %s' % (imd,))
135 | else:
136 | self.image_pixels_per_unit = imd
137 |
138 | if 'visible' in params:
139 | visible = params['visible']
140 | del params["visible"] # drop those that are not Keras parameters
141 | self.visible = visible
142 |
143 | if 'colormap' in params:
144 | colormap = params["colormap"]
145 | if isinstance(colormap, (tuple, list)):
146 | if len(colormap) != 3:
147 | raise Exception("Invalid colormap format: requires (colormap_name, vmin, vmax)")
148 | else:
149 | self.colormap = colormap[0]
150 | self.minmax = colormap[1:]
151 | else:
152 | self.colormap = colormap
153 | del params["colormap"] # drop those that are not Keras parameters
154 |
155 | if 'minmax' in params:
156 | self.minmax = params['minmax']
157 | del params["minmax"] # drop those that are not Keras parameters
158 |
159 | if 'dropout' in params:
160 | dropout = params['dropout']
161 | del params["dropout"] # we handle dropout layers
162 | if dropout == None:
163 | dropout = 0
164 | dropout_dim = 0
165 | elif isinstance(dropout, numbers.Real):
166 | dropout_dim = 0
167 | elif isinstance(dropout, (list, tuple)):
168 | dropout_dim = dropout[1]
169 | dropout = dropout[0]
170 | else:
171 | raise Exception('bad dropout option: %s' % (dropout,))
172 | if not (0 <= dropout <= 1):
173 | raise Exception('bad dropout rate: %s' % (dropout,))
174 | self.dropout = dropout
175 | self.dropout_dim = dropout_dim
176 | else:
177 | self.dropout = 0
178 | self.dropout_dim = 0
179 |
180 | if 'bidirectional' in params:
181 | bidirectional = params['bidirectional']
182 | del params["bidirectional"] # we handle it
183 | if bidirectional not in ['sum', 'mul', 'concat', 'ave', True, None]:
184 | raise Exception('bad bidirectional value: %s' % (bidirectional,))
185 | self.bidirectional = bidirectional
186 | else:
187 | self.bidirectional = None
188 |
189 | if 'time_distributed' in params:
190 | time_distributed = params['time_distributed']
191 | del params["time_distributed"] # we handle time distributed wrappers
192 | self.time_distributed = time_distributed
193 | else:
194 | self.time_distributed = False
195 |
196 | if 'activation' in params: # let's keep a copy of it
197 | self.activation = params["activation"]
198 | if not isinstance(self.activation, str):
199 | self.activation = None
200 |
201 | self.incoming_connections = []
202 | self.outgoing_connections = []
203 |
204 | def _check_layer_name(self, layer_name):
205 | """
206 | Check to see if a layer name is appropriate.
207 | Raises exception if invalid name.
208 | """
209 | valid_chars = string.ascii_letters + string.digits + "_-%"
210 | if len(layer_name) == 0:
211 | raise Exception("layer name must not be length 0: '%s'" % layer_name)
212 | if not all(char in valid_chars for char in layer_name):
213 | raise Exception("layer name must only contain letters, numbers, '-', and '_': '%s'" % layer_name)
214 | if layer_name.count("%") != layer_name.count("%d"):
215 | raise Exception("layer name must only contain '%%d'; no other formatting allowed: '%s'" % layer_name)
216 | if layer_name.count("%d") not in [0, 1]:
217 | raise Exception("layer name must contain at most one %%d: '%s'" % layer_name)
218 |
219 | def on_connect(self, relation, other_layer):
220 | """
221 | relation is "to"/"from" indicating which layer self is.
222 | """
223 | pass
224 |
225 | def __repr__(self):
226 | return "<%s name='%s'>" % (self.CLASS.__name__, self.name)
227 |
228 | def kind(self):
229 | """
230 | Determines whether a layer is a "input", "hidden", or "output" layer based on
231 | its connections. If no connections, then it is "unconnected".
232 | """
233 | if len(self.incoming_connections) == 0 and len(self.outgoing_connections) == 0:
234 | return 'unconnected'
235 | elif len(self.incoming_connections) > 0 and len(self.outgoing_connections) > 0:
236 | return 'hidden'
237 | elif len(self.incoming_connections) > 0:
238 | return 'output'
239 | else:
240 | return 'input'
241 |
242 | def make_input_layer_k(self):
243 | """
244 | Make an input layer for this type of layer. This allows Layers to have
245 | special kinds of input layers. Would need to be overrided in subclass.
246 | """
247 | return keras.layers.Input(self.shape, *self.args, **self.params)
248 |
249 | def make_input_layer_k_text(self):
250 | """
251 | Make an input layer for this type of layer. This allows Layers to have
252 | special kinds of input layers. Would need to be overrided in subclass.
253 | """
254 | ## FIXME: WIP, don't include args, params if empty
255 | return "keras.layers.Input(%s, *%s, **%s)" % (self.shape, self.args, self.params)
256 |
257 | def make_keras_function(self):
258 | """
259 | This makes the Keras function for the functional interface.
260 | """
261 | ## This is for all Keras layers:
262 | return self.CLASS(*self.args, **self.params)
263 |
264 | def make_keras_function_text(self):
265 | """
266 | This makes the Keras function for the functional interface.
267 | """
268 | ## This is for all Keras layers:
269 | ## FIXME: WIP, don't include args, params if empty
270 | return "keras.layers.%s(*%s, **%s)" % (self.CLASS.__name__, self.args, self.params)
271 |
272 | def make_keras_functions(self):
273 | """
274 | Make all Keras functions for this layer, including its own,
275 | dropout, etc.
276 | """
277 | from keras.layers import (TimeDistributed, Bidirectional, Dropout,
278 | SpatialDropout1D, SpatialDropout2D, SpatialDropout3D)
279 |
280 | k = self.make_keras_function() # can override
281 | ### wrap layer:
282 | if self.bidirectional:
283 | if self.bidirectional is True:
284 | k = Bidirectional(k, name=self.name)
285 | else:
286 | k = Bidirectional(k, merge_mode=self.bidirectional, name=self.name)
287 | if self.time_distributed:
288 | k = TimeDistributed(k, name=self.name)
289 | ### sequence:
290 | k = [k]
291 | if self.dropout > 0:
292 | if self.dropout_dim == 0:
293 | k += [Dropout(self.dropout)]
294 | elif self.dropout_dim == 1:
295 | k += [SpatialDropout1D(self.dropout)]
296 | elif self.dropout_dim == 2:
297 | k += [SpatialDropout2D(self.dropout)]
298 | elif self.dropout_dim == 3:
299 | k += [SpatialDropout3D(self.dropout)]
300 | return k
301 |
302 | def make_keras_functions_text(self):
303 | """
304 | Make all Keras functions for this layer, including its own,
305 | dropout, etc.
306 | """
307 | def bidir_mode(name):
308 | if name in [True, None]:
309 | return "concat"
310 | else:
311 | return name
312 | program = self.make_keras_function_text()
313 | if self.time_distributed:
314 | program = "keras.layers.TimeDistributed(%s, name='%s')" % (program, self.name)
315 | if self.bidirectional:
316 | program = "keras.layers.Bidirectional(%s, name='%s', mode='%s')" % (
317 | program, self.name, bidir_mode(self.bidirectional))
318 | retval = [program]
319 | if self.dropout > 0:
320 | if self.dropout_dim == 0:
321 | retval += ["keras.layers.Dropout(self.dropout)"]
322 | elif self.dropout_dim == 1:
323 | retval += ["keras.layers.SpatialDropout1D(self.dropout)"]
324 | elif self.dropout_dim == 2:
325 | retval += ["keras.layers.SpatialDropout2D(self.dropout)"]
326 | elif self.dropout_dim == 3:
327 | retval += ["keras.layers.SpatialDropout3D(self.dropout)"]
328 | return "[" + (", ".join(retval)) + "]"
329 |
330 | def get_colormap(self):
331 | if self.__class__.__name__ == "FlattenLayer":
332 | if self.colormap is None:
333 | return self.incoming_connections[0].get_colormap()
334 | else:
335 | return self.colormap
336 | elif self.kind() == "input":
337 | return "gray" if self.colormap is None else self.colormap
338 | else:
339 | return get_colormap() if self.colormap is None else self.colormap
340 |
341 | # class: _BaseLayer
342 | def make_image(self, vector, colormap=None, config={}):
343 | """
344 | Given an activation name (or function), and an output vector, display
345 | make and return an image widget.
346 | """
347 | import keras.backend as K
348 | from matplotlib import cm
349 | import PIL
350 | import PIL.ImageDraw
351 | if self.vshape and self.vshape != self.shape:
352 | vector = vector.reshape(self.vshape)
353 | if len(vector.shape) > 2:
354 | ## Drop dimensions of vector:
355 | s = slice(None, None)
356 | args = []
357 | # The data is in the same format as Keras
358 | # so we can ask Keras what that format is:
359 | # ASSUMES: that the network that loaded the
360 | # dataset has the same image_data_format as
361 | # now:
362 | if K.image_data_format() == 'channels_last':
363 | for d in range(len(vector.shape)):
364 | if d in [0, 1]:
365 | args.append(s) # keep the first two
366 | else:
367 | args.append(self.feature) # pick which to use
368 | else: # 'channels_first'
369 | count = 0
370 | for d in range(len(vector.shape)):
371 | if d in [0]:
372 | args.append(self.feature) # pick which to use
373 | else:
374 | if count < 2:
375 | args.append(s)
376 | count += 1
377 | vector = vector[args]
378 | vector = scale_output_for_image(vector, self.get_act_minmax(), truncate=True)
379 | if len(vector.shape) == 1:
380 | vector = vector.reshape((1, vector.shape[0]))
381 | size = config.get("pixels_per_unit",1)
382 | new_width = vector.shape[0] * size # in, pixels
383 | new_height = vector.shape[1] * size # in, pixels
384 | if colormap is None:
385 | colormap = self.get_colormap()
386 | if colormap is not None:
387 | try:
388 | cm_hot = cm.get_cmap(colormap)
389 | except:
390 | cm_hot = cm.get_cmap("RdGy")
391 | vector = cm_hot(vector)
392 | vector = np.uint8(vector * 255)
393 | if max(vector.shape) <= self.max_draw_units:
394 | # Need to make it bigger, to draw circles:
395 | ## Make this value too small, and borders are blocky;
396 | ## too big and borders are too thin
397 | scale = int(250 / max(vector.shape))
398 | size = size * scale
399 | image = PIL.Image.new('RGBA', (new_height * scale, new_width * scale), color="white")
400 | draw = PIL.ImageDraw.Draw(image)
401 | for row in range(vector.shape[1]):
402 | for col in range(vector.shape[0]):
403 | ## upper-left, lower-right:
404 | draw.rectangle((row * size, col * size,
405 | (row + 1) * size - 1, (col + 1) * size - 1),
406 | fill=tuple(vector[col][row]),
407 | outline='black')
408 | else:
409 | image = PIL.Image.fromarray(vector)
410 | image = image.resize((new_height, new_width))
411 | ## If rotated, and has features, rotate it:
412 | if config.get("svg_rotate", False):
413 | output_shape = self.get_output_shape()
414 | if ((isinstance(output_shape, tuple) and len(output_shape) >= 3) or
415 | (self.vshape is not None and len(self.vshape) == 2)):
416 | image = image.rotate(90, expand=1)
417 | return image
418 |
419 | def make_dummy_vector(self, default_value=0.0):
420 | """
421 | This is in the easy to use human format (list of lists ...)
422 | """
423 | ## FIXME: for pictures give a vector
424 | if (self.shape is None or
425 | (isinstance(self.shape, (list, tuple)) and None in self.shape)):
426 | v = np.ones(100) * default_value
427 | else:
428 | v = np.ones(self.shape) * default_value
429 | lo, hi = self.get_act_minmax()
430 | v *= (lo + hi) / 2.0
431 | return v.tolist()
432 |
433 | def get_act_minmax(self):
434 | """
435 | Get the activation (output) min/max for a layer.
436 |
437 | Note: +/- 2 represents infinity
438 | """
439 | if self.minmax is not None: ## allow override
440 | return self.minmax
441 | else:
442 | if self.__class__.__name__ == "FlattenLayer":
443 | in_layer = self.incoming_connections[0]
444 | return in_layer.get_act_minmax()
445 | elif self.kind() == "input":
446 | ## try to get from dataset
447 | if self.network and len(self.network.dataset) > 0:
448 | bank_idx = self.network.input_bank_order.index(self.name)
449 | return self.network.dataset._inputs_range[bank_idx]
450 | else:
451 | return (-2,+2)
452 | else: ## try to get from activation function
453 | if self.activation in ["tanh", 'softsign']:
454 | return (-1,+1)
455 | elif self.activation in ["sigmoid",
456 | "softmax",
457 | 'hard_sigmoid']:
458 | return (0,+1)
459 | elif self.activation in ["relu", 'elu', 'softplus']:
460 | return (0,+2)
461 | elif self.activation in ["selu", "linear"]:
462 | return (-2,+2)
463 | else: # default, or unknown activation function
464 | ## Enhancement:
465 | ## Someday could sample the unknown activation function
466 | ## and provide reasonable values
467 | return (-2,+2)
468 |
469 | def get_output_shape(self):
470 | ## FIXME: verify this:
471 | if self.keras_layer is not None:
472 | if hasattr(self.keras_layer, "output_shape"):
473 | return self.keras_layer.output_shape
474 | ## Tensors don't have output_shape; is this right:
475 | elif hasattr(self.keras_layer, "_keras_shape"):
476 | return self.keras_layer._keras_shape
477 |
478 | def tooltip(self):
479 | """
480 | String (with newlines) for describing layer."
481 | """
482 | def format_range(minmax):
483 | minv, maxv = minmax
484 | if minv <= -2:
485 | minv = "-Infinity"
486 | if maxv >= +2:
487 | maxv = "+Infinity"
488 | return "(%s, %s)" % (minv, maxv)
489 |
490 | kind = self.kind()
491 | retval = "Layer: %s (%s)" % (html.escape(self.name), kind)
492 | retval += "\n output range: %s" % (format_range(self.get_act_minmax(),))
493 | if self.shape:
494 | retval += "\n shape = %s" % (self.shape, )
495 | if self.dropout:
496 | retval += "\n dropout = %s" % self.dropout
497 | if self.dropout_dim > 0:
498 | retval += "\n dropout dimension = %s" % self.dropout_dim
499 | if self.bidirectional:
500 | retval += "\n bidirectional = %s" % self.bidirectional
501 | if kind == "input":
502 | retval += "\n Keras class = Input"
503 | else:
504 | retval += "\n Keras class = %s" % self.CLASS.__name__
505 | for key in self.params:
506 | if key in ["name"] or self.params[key] is None:
507 | continue
508 | retval += "\n %s = %s" % (key, html.escape(str(self.params[key])))
509 | return retval
510 |
511 | class Layer(_BaseLayer):
512 | """
513 | The default layer type. Will create either an InputLayer, or DenseLayer,
514 | depending on its context after :any:`Network.connect`.
515 |
516 | Arguments:
517 | name: The name of the layer. Must be unique in this network. Should
518 | not contain special HTML characters.
519 |
520 | Examples:
521 | >>> layer = Layer("input", 10)
522 | >>> layer.name
523 | 'input'
524 |
525 | >>> from conx import Network
526 | >>> net = Network("XOR2")
527 | >>> net.add(Layer("input", 2))
528 | 'input'
529 | >>> net.add(Layer("hidden", 5))
530 | 'hidden'
531 | >>> net.add(Layer("output", 2))
532 | 'output'
533 | >>> net.connect()
534 | >>> net["input"].kind()
535 | 'input'
536 | >>> net["output"].kind()
537 | 'output'
538 |
539 | Note:
540 | See also: :any:`Network`, :any:`Network.add`, and :any:`Network.connect`
541 | for more information. See https://keras.io/ for more information on
542 | Keras layers.
543 | """
544 | CLASS = keras.layers.Dense
545 | def __init__(self, name: str, shape, **params):
546 | super().__init__(name, **params)
547 | self.config.update({
548 | "class": self.__class__.__name__,
549 | "name": name,
550 | "args": [shape],
551 | "params": copy.copy(params),
552 | })
553 | if not valid_shape(shape):
554 | raise Exception('bad shape: %s' % (shape,))
555 | # set layer topology (shape) and number of units (size)
556 | if isinstance(shape, numbers.Integral) or shape is None:
557 | self.shape = (shape,)
558 | self.size = shape
559 | else:
560 | # multi-dimensional layer
561 | self.shape = shape
562 | if all([isinstance(n, numbers.Integral) for n in shape]):
563 | self.size = reduce(operator.mul, shape)
564 | else:
565 | self.size = None # can't compute size because some dim are None
566 |
567 | if 'activation' in params:
568 | act = params['activation']
569 | if act == None:
570 | act = 'linear'
571 | if not (callable(act) or act in Layer.ACTIVATION_FUNCTIONS):
572 | raise Exception('unknown activation function: %s' % (act,))
573 | self.activation = act
574 | if not isinstance(self.activation, str):
575 | self.activation = None
576 |
577 | def __repr__(self):
578 | return "" % (
579 | self.name, self.shape, self.activation)
580 |
581 | def print_summary(self, fp=sys.stdout):
582 | """
583 | Print a summary of the dense/input layer.
584 | """
585 | super().print_summary(fp)
586 | if self.activation:
587 | print(" * **Activation function**:", self.activation, file=fp)
588 | if self.dropout:
589 | print(" * **Dropout percent** :", self.dropout, file=fp)
590 | if self.dropout_dim > 0:
591 | print(" * **Dropout dimension** :", self.dropout_dim, file=fp)
592 | if self.bidirectional:
593 | print(" * **Bidirectional mode** :", self.bidirectional, file=fp)
594 |
595 | def make_keras_function(self):
596 | """
597 | For all Keras-based functions. Returns the Keras class.
598 | """
599 | return self.CLASS(self.size, **self.params)
600 |
601 | def make_keras_function_text(self):
602 | """
603 | For all Keras-based functions. Returns the Keras class.
604 | """
605 | return "keras.layers.%s(%s, **%s)" % (self.CLASS.__name__, self.size, self.params)
606 |
607 | class ImageLayer(Layer):
608 | """
609 | A class for images. WIP.
610 | """
611 | def __init__(self, name, dimensions, depth, **params):
612 | ## get value before processing
613 | keep_aspect_ratio = params.get("keep_aspect_ratio", True)
614 | super().__init__(name, dimensions, **params)
615 | self.config.update({
616 | "class": self.__class__.__name__,
617 | "name": name,
618 | "args": [dimensions, depth],
619 | "params": copy.copy(params),
620 | })
621 | if self.vshape is None:
622 | self.vshape = self.shape
623 | ## override defaults set in constructor:
624 | self.keep_aspect_ratio = keep_aspect_ratio
625 | self.dimensions = dimensions
626 | self.depth = depth
627 | if K.image_data_format() == "channels_last":
628 | self.shape = tuple(list(self.shape) + [depth])
629 | self.image_indexes = (0, 1)
630 | else:
631 | self.shape = tuple([depth] + list(self.shape))
632 | self.image_indexes = (1, 2)
633 |
634 | # class: ImageLayer
635 | def make_image(self, vector, colormap=None, config={}):
636 | """
637 | Given an activation name (or function), and an output vector, display
638 | make and return an image widget. Colormap is ignored.
639 | """
640 | ## see K.image_data_format() == 'channels_last': above
641 | ## We keep the dataset data in the right format.
642 | import PIL
643 | v = (vector * 255).astype("uint8")
644 | if self.depth == 1:
645 | v = v.squeeze() # get rid of nested lists (len of 1)
646 | else:
647 | v = v.reshape(self.dimensions[0],
648 | self.dimensions[1],
649 | self.depth)
650 | image = PIL.Image.fromarray(v)
651 | if config.get("svg_rotate", False):
652 | image = image.rotate(90, expand=1)
653 | return image
654 |
655 | class AddLayer(_BaseLayer):
656 | """
657 | A Layer for adding the output vectors of multiple layers together.
658 | """
659 | CLASS = keras.layers.Add
660 | def __init__(self, name, **params):
661 | self.layers = []
662 | super().__init__(name)
663 | self.config.update({
664 | "class": self.__class__.__name__,
665 | "name": name,
666 | "args": [],
667 | "params": copy.copy(params),
668 | })
669 | self.handle_merge = True
670 |
671 | def make_keras_functions(self):
672 | """
673 | This keras function just returns the Tensor.
674 | """
675 | return [lambda k: k]
676 |
677 | def make_keras_function(self):
678 | from keras.layers import Add
679 | layers = [(layer.k if layer.k is not None else layer.keras_layer) for layer in self.layers]
680 | return Add(**self.params)(layers)
681 |
682 | def on_connect(self, relation, other_layer):
683 | """
684 | relation is "to"/"from" indicating which layer self is.
685 | """
686 | if relation == "to":
687 | ## other_layer must be an Input layer
688 | self.layers.append(other_layer)
689 |
690 | class SubtractLayer(AddLayer):
691 | """
692 | A layer for subtracting the output vectors of layers.
693 | """
694 | CLASS = keras.layers.Subtract
695 | def make_keras_function(self):
696 | from keras.layers import Substract
697 | layers = [(layer.k if layer.k is not None else layer.keras_layer) for layer in self.layers]
698 | return Subtract(**self.params)(layers)
699 |
700 | class MultiplyLayer(AddLayer):
701 | """
702 | A layer for multiplying the output vectors of layers
703 | together.
704 | """
705 | CLASS = keras.layers.Multiply
706 | def make_keras_function(self):
707 | from keras.layers import Multiply
708 | layers = [(layer.k if layer.k is not None else layer.keras_layer) for layer in self.layers]
709 | return Multiply(**self.params)(layers)
710 |
711 | class AverageLayer(AddLayer):
712 | """
713 | A layer for averaging the output vectors of layers
714 | together.
715 | """
716 | CLASS = keras.layers.Average
717 | def make_keras_function(self):
718 | from keras.layers import Average
719 | layers = [(layer.k if layer.k is not None else layer.keras_layer) for layer in self.layers]
720 | return Average(**self.params)(layers)
721 |
722 | class MaximumLayer(AddLayer):
723 | """
724 | A layer for finding the maximum values of layers.
725 | """
726 | CLASS = keras.layers.Maximum
727 | def make_keras_function(self):
728 | from keras.layers import Maximum
729 | layers = [(layer.k if layer.k is not None else layer.keras_layer) for layer in self.layers]
730 | return Maximum(**self.params)(layers)
731 |
732 | class ConcatenateLayer(AddLayer):
733 | """
734 | A layer for sticking layers together.
735 | """
736 | CLASS = keras.layers.Concatenate
737 | def make_keras_function(self):
738 | from keras.layers import Concatenate
739 | layers = [(layer.k if layer.k is not None else layer.keras_layer) for layer in self.layers]
740 | return Concatenate(**self.params)(layers)
741 |
742 | class DotLayer(AddLayer):
743 | """
744 | A layer for computing the dot product between layers.
745 | """
746 | CLASS = keras.layers.Dot
747 | def make_keras_function(self):
748 | from keras.layers import Dot
749 | layers = [(layer.k if layer.k is not None else layer.keras_layer) for layer in self.layers]
750 | return Dot(**self.params)(layers)
751 |
752 | class LambdaLayer(Layer):
753 | CLASS = keras.layers.Lambda
754 | def __init__(self, name, size, function, **params):
755 | super().__init__(name, size, **params)
756 | self.config.update({
757 | "class": self.__class__.__name__,
758 | "name": name,
759 | "args": [size, function],
760 | "params": copy.copy(params),
761 | })
762 |
763 | def make_keras_function(self):
764 | """
765 | For all Keras-based functions. Returns the Keras class.
766 | """
767 | return self.CLASS(**self.params)
768 |
769 | def make_keras_function_text(self):
770 | """
771 | For all Keras-based functions. Returns the Keras class.
772 | """
773 | return "keras.layers.%s(**%s)" % (self.CLASS.__name__, self.params)
774 |
775 | class EmbeddingLayer(Layer):
776 | """
777 | A class for embeddings. WIP.
778 | """
779 | def __init__(self, name, in_size, out_size, **params):
780 | super().__init__(name, in_size, **params)
781 | self.config.update({
782 | "class": self.__class__.__name__,
783 | "name": name,
784 | "args": [in_size, out_size],
785 | "params": copy.copy(params),
786 | })
787 | if self.vshape is None:
788 | self.vshape = self.shape
789 | self.in_size = in_size
790 | self.out_size = out_size
791 | self.sequence_size = None # get filled in on_connect
792 |
793 | def make_keras_function(self):
794 | from keras.layers import Embedding as KerasEmbedding
795 | return KerasEmbedding(self.in_size, self.out_size, input_length=self.sequence_size, **self.params)
796 |
797 | def on_connect(self, relation, other_layer):
798 | """
799 | relation is "to"/"from" indicating which layer self is.
800 | """
801 | if relation == "to":
802 | ## other_layer must be an Input layer
803 | self.sequence_size = other_layer.size # get the input_length
804 | self.shape = (self.sequence_size, self.out_size)
805 | if self.sequence_size:
806 | self.size = self.sequence_size * self.out_size
807 | else:
808 | self.size = None
809 | self.vshape = (self.sequence_size, self.out_size)
810 | other_layer.size = (None,) # poke in this otherwise invalid size
811 | other_layer.shape = (self.sequence_size,) # poke in this shape
812 | other_layer.params["dtype"] = "int32" # assume ints
813 | other_layer.make_dummy_vector = lambda v=0.0: np.zeros(self.sequence_size) * v
814 | other_layer.minmax = (0, self.in_size)
815 |
816 | def process_class_docstring(docstring):
817 | docstring = re.sub(r'\n # (.*)\n',
818 | r'\n __\1__\n\n',
819 | docstring)
820 | docstring = re.sub(r' ([^\s\\\(]+):(.*)\n',
821 | r' - __\1__:\2\n',
822 | docstring)
823 | docstring = docstring.replace(' ' * 5, '\t\t')
824 | docstring = docstring.replace(' ' * 3, '\t')
825 | docstring = docstring.replace(' ', '')
826 | return docstring
827 |
828 | ## Dynamically load all of the keras layers, making a conx layer:
829 | ## Al of these will have _BaseLayer as their superclass:
830 | keras_module = sys.modules["keras.layers"]
831 | for (name, obj) in inspect.getmembers(keras_module):
832 | if name in ["Embedding", "Input", "Dense", "TimeDistributed",
833 | "Add", "Subtract", "Multiply", "Average",
834 | "Maximum", "Concatenate", "Dot", "Lambda"]:
835 | continue
836 | if type(obj) == type and issubclass(obj, (keras.engine.Layer, )):
837 | new_name = "%sLayer" % name
838 | docstring = obj.__doc__
839 | if pypandoc:
840 | try:
841 | docstring_md = ' **%s**\n\n' % (new_name,)
842 | docstring_md += obj.__doc__
843 | docstring = pypandoc.convert(process_class_docstring(docstring_md), "rst", "markdown_github")
844 | except:
845 | pass
846 | locals()[new_name] = type(new_name, (_BaseLayer,),
847 | {"CLASS": obj,
848 | "__doc__": docstring})
849 |
850 | # for consistency:
851 | DenseLayer = Layer
852 | InputLayer = Layer
853 | AdditionLayer = AddLayer
854 | SubtractionLayer = SubtractLayer
855 | MultiplicationLayer = MultiplyLayer
856 |
--------------------------------------------------------------------------------
/conx/networks/__init__.py:
--------------------------------------------------------------------------------
1 |
2 | ## All functions here must be loadable networks
3 | ## All modules must be named differently from their functions!
4 | ## Otherwise, confuses tools like nose, inspect, etc.
5 |
6 | from ._keras import vgg16, vgg19, inceptionv3
7 |
--------------------------------------------------------------------------------
/conx/networks/_keras.py:
--------------------------------------------------------------------------------
1 | import os
2 | import numpy as np
3 |
4 | import keras.applications
5 | from keras.utils import get_file
6 | from keras.applications.imagenet_utils import preprocess_input, decode_predictions
7 |
8 | from ..utils import import_keras_model
9 |
10 | def vgg_preprocess(input):
11 | batch = np.array(input).reshape((1, 224, 224, 3))
12 | assert np.min(batch) >= 0 and np.max(batch) <= 1
13 | batch *= 255
14 | b = preprocess_input(batch)
15 | return b[0].tolist()
16 |
17 | def inceptionv3_preprocess(input):
18 | batch = np.array(input).reshape((1, 299, 299, 3))
19 | assert np.min(batch) >= 0 and np.max(batch) <= 1
20 | batch *= 255
21 | b = preprocess_input(batch, mode='tf')
22 | return b[0].tolist()
23 |
24 | def vgg_decode(probabilities, top=5):
25 | return decode_predictions(np.array(probabilities).reshape((1,1000)), top=top)[0]
26 |
27 | def vgg16(*args, **kwargs):
28 | WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1'
29 | WEIGHTS_NAME = 'vgg16_weights_tf_dim_ordering_tf_kernels.h5'
30 | if "weights" not in kwargs:
31 | kwargs["weights"] = None
32 | model = keras.applications.VGG16(**kwargs)
33 | network = import_keras_model(model, "VGG16", build_propagate_from_models=False)
34 | weights_path = get_file(
35 | WEIGHTS_NAME,
36 | os.path.join(WEIGHTS_PATH, WEIGHTS_NAME),
37 | cache_subdir='models',
38 | file_hash='64373286793e3c8b2b4e3219cbf3544b')
39 | network.load_weights(*weights_path.rsplit("/", 1))
40 | network.config["hspace"] = 200
41 | network.preprocess = vgg_preprocess
42 | network.postprocess = vgg_decode
43 | network.information = """
44 | This network architecture comes from the paper:
45 |
46 | Very Deep Convolutional Networks for Large-Scale Image Recognition
47 | by Karen Simonyan and Andrew Zisserman.
48 |
49 | Their network was trained on the ImageNet challenge dataset.
50 | The dataset contains 32,326 images broken down into 1,000 categories.
51 |
52 | The network was trained for 74 epochs on the training data. This typically
53 | took 3 to 4 weeks time on a computer with 4 GPUs. This network's weights were
54 | converted from the original Caffe model into Keras.
55 |
56 | Sources:
57 | * https://arxiv.org/pdf/1409.1556.pdf
58 | * http://www.robots.ox.ac.uk/~vgg/research/very_deep/
59 | * http://www.image-net.org/challenges/LSVRC/
60 | * http://image-net.org/challenges/LSVRC/2014/
61 | * http://image-net.org/challenges/LSVRC/2014/browse-synsets
62 | """
63 | return network
64 |
65 | def vgg19(*args, **kwargs):
66 | WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1'
67 | WEIGHTS_NAME = 'vgg19_weights_tf_dim_ordering_tf_kernels.h5'
68 | if "weights" not in kwargs:
69 | kwargs["weights"] = None
70 | model = keras.applications.VGG19(**kwargs)
71 | network = import_keras_model(model, "VGG19", build_propagate_from_models=False)
72 | weights_path = get_file(
73 | WEIGHTS_NAME,
74 | os.path.join(WEIGHTS_PATH, WEIGHTS_NAME),
75 | cache_subdir='models',
76 | file_hash='253f8cb515780f3b799900260a226db6')
77 | network.load_weights(*weights_path.rsplit("/", 1))
78 | network.config["hspace"] = 200
79 | network.preprocess = vgg_preprocess
80 | network.postprocess = vgg_decode
81 | network.information = """
82 | This network architecture comes from the paper:
83 |
84 | Very Deep Convolutional Networks for Large-Scale Image Recognition
85 | by Karen Simonyan and Andrew Zisserman.
86 |
87 | Their network was trained on the ImageNet challenge dataset.
88 | The dataset contains 32,326 images broken down into 1,000 categories.
89 |
90 | The network was trained for 74 epochs on the training data. This typically
91 | took 3 to 4 weeks time on a computer with 4 GPUs. This network's weights were
92 | converted from the original Caffe model into Keras.
93 |
94 | Sources:
95 | * https://arxiv.org/pdf/1409.1556.pdf
96 | * http://www.robots.ox.ac.uk/~vgg/research/very_deep/
97 | * http://www.image-net.org/challenges/LSVRC/
98 | * http://image-net.org/challenges/LSVRC/2014/
99 | * http://image-net.org/challenges/LSVRC/2014/browse-synsets
100 | """
101 | return network
102 |
103 | def inceptionv3(*args, **kwargs):
104 | WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.5'
105 | WEIGHTS_NAME = 'inception_v3_weights_tf_dim_ordering_tf_kernels.h5'
106 | if "weights" not in kwargs:
107 | kwargs["weights"] = None
108 | model = keras.applications.InceptionV3(**kwargs)
109 | network = import_keras_model(model, "InceptionV3", build_propagate_from_models=False)
110 | weights_path = get_file(
111 | WEIGHTS_NAME,
112 | os.path.join(WEIGHTS_PATH, WEIGHTS_NAME),
113 | cache_subdir='models',
114 | file_hash='9a0d58056eeedaa3f26cb7ebd46da564')
115 | network.load_weights(*weights_path.rsplit("/", 1))
116 | network.config["hspace"] = 200
117 | network.preprocess = inceptionv3_preprocess
118 | network.postprocess = vgg_decode
119 | network.information = """
120 | This network architecture comes from the paper:
121 |
122 | Rethinking the Inception Architecture for Computer Vision
123 |
124 | The default input size for this model is 299 x 299.
125 |
126 | These weights are released under the [Apache License](https://github.com/tensorflow/models/blob/master/LICENSE).
127 |
128 | Sources:
129 |
130 | * http://arxiv.org/abs/1512.00567
131 | """
132 | return network
133 |
--------------------------------------------------------------------------------
/conx/tests/test_network.py:
--------------------------------------------------------------------------------
1 | from conx import *
2 |
3 | def test_network_constructor():
4 | """
5 | Network constructor.
6 | """
7 | net = Network("Constructor", 2, 5, 2)
8 | assert net is not None
9 |
10 | def test_xor1():
11 | """
12 | Standard XOR.
13 | """
14 | net = Network("XOR")
15 | net.add(Layer("input", 2))
16 | net.add(Layer("hidden", 5))
17 | net.add(Layer("output", 1))
18 | net.connect("input", "hidden")
19 | net.connect("hidden", "output")
20 | net.compile(error="binary_crossentropy", optimizer="adam")
21 | net.summary()
22 | net.model.summary()
23 | net.dataset.load([[[0, 0], [0]],
24 | [[0, 1], [1]],
25 | [[1, 0], [1]],
26 | [[1, 1], [0]]])
27 | net.train(epochs=2000, accuracy=1, report_rate=25, plot=False)
28 | net.evaluate(show=True)
29 | net.save_weights("/tmp")
30 | net.load_weights("/tmp")
31 | svg = net.to_svg()
32 | assert net is not None
33 |
34 | def test_xor2():
35 | """
36 | Two inputs, two outputs.
37 | """
38 | net = Network("XOR2")
39 | net.add(Layer("input1", shape=1))
40 | net.add(Layer("input2", shape=1))
41 | net.add(Layer("hidden1", shape=2, activation="sigmoid"))
42 | net.add(Layer("hidden2", shape=2, activation="sigmoid"))
43 | net.add(Layer("shared-hidden", shape=2, activation="sigmoid"))
44 | net.add(Layer("output1", shape=1, activation="sigmoid"))
45 | net.add(Layer("output2", shape=1, activation="sigmoid"))
46 | net.connect("input1", "hidden1")
47 | net.connect("input2", "hidden2")
48 | net.connect("hidden1", "shared-hidden")
49 | net.connect("hidden2", "shared-hidden")
50 | net.connect("shared-hidden", "output1")
51 | net.connect("shared-hidden", "output2")
52 | net.compile(error='mean_squared_error',
53 | optimizer=SGD(lr=0.3, momentum=0.9))
54 |
55 | net.dataset.load([
56 | ([[0],[0]], [[0],[0]]),
57 | ([[0],[1]], [[1],[1]]),
58 | ([[1],[0]], [[1],[1]]),
59 | ([[1],[1]], [[0],[0]])
60 | ])
61 | net.train(2000, report_rate=10, accuracy=1, plot=False)
62 | net.evaluate(show=True)
63 | net.propagate_to("shared-hidden", [[1], [1]])
64 | net.propagate_to("output1", [[1], [1]])
65 | net.propagate_to("output2", [[1], [1]])
66 | net.propagate_to("hidden1", [[1], [1]])
67 | net.propagate_to("hidden2", [[1], [1]])
68 | net.propagate_to("output1", [[1], [1]])
69 | net.propagate_to("output2", [[1], [1]])
70 | net.save_weights("/tmp")
71 | net.load_weights("/tmp")
72 | net.evaluate(show=True)
73 | svg = net.to_svg()
74 | assert net is not None
75 |
76 | def test_dataset():
77 | """
78 | Load Virtual MNIST dataset after network creation.
79 | """
80 | net = Network("MNIST")
81 | net.add(Layer("input", shape=784, vshape=(28, 28), colormap="hot", minmax=(0,1)))
82 | net.add(Layer("hidden1", shape=512, vshape=(16,32), activation='relu', dropout=0.2))
83 | net.add(Layer("hidden2", shape=512, vshape=(16,32), activation='relu', dropout=0.2))
84 | net.add(Layer("output", shape=10, activation='softmax'))
85 | net.connect('input', 'hidden1')
86 | net.connect('hidden1', 'hidden2')
87 | net.connect('hidden2', 'output')
88 | net.compile(optimizer="adam", error="binary_crossentropy")
89 | net.get_dataset("mnist")
90 | assert net is not None
91 | net.dataset.clear()
92 |
93 | def test_dataset2():
94 | """
95 | Load data before adding network.
96 | """
97 | net = Network("MNIST")
98 | net.add(Layer("input", shape=784, vshape=(28, 28), colormap="hot", minmax=(0,1)))
99 | net.add(Layer("hidden1", shape=512, vshape=(16,32), activation='relu', dropout=0.2))
100 | net.add(Layer("hidden2", shape=512, vshape=(16,32), activation='relu', dropout=0.2))
101 | net.add(Layer("output", shape=10, activation='softmax'))
102 | net.connect('input', 'hidden1')
103 | net.connect('hidden1', 'hidden2')
104 | net.connect('hidden2', 'output')
105 | net.compile(optimizer="adam", error="binary_crossentropy")
106 | net.get_dataset("mnist")
107 | net.dataset.split(100)
108 | net.dataset.slice(100)
109 | assert net is not None
110 | net.dataset.clear()
111 |
112 | def test_images():
113 | net = Network("MNIST")
114 | net.get_dataset("mnist")
115 | assert net.dataset.inputs.shape == [(28,28,1)]
116 | net.add(Layer("input", shape=(28, 28, 1), colormap="hot", minmax=(0,1)))
117 | net.add(FlattenLayer("flatten"))
118 | net.add(Layer("hidden1", shape=512, vshape=(16,32), activation='relu', dropout=0.2))
119 | net.add(Layer("hidden2", shape=512, vshape=(16,32), activation='relu', dropout=0.2))
120 | net.add(Layer("output", shape=10, activation='softmax'))
121 | net.connect('input', 'flatten')
122 | net.connect('flatten', 'hidden1')
123 | net.connect('hidden1', 'hidden2')
124 | net.connect('hidden2', 'output')
125 | net.compile(optimizer="adam", error="binary_crossentropy")
126 | svg = net.to_svg()
127 | assert svg is not None
128 | net.dataset.clear()
129 |
130 | def test_cifar10():
131 | """
132 | Test the cifar10 API and training.
133 | """
134 | from conx import Network, Layer, Conv2DLayer, MaxPool2DLayer, FlattenLayer
135 |
136 | batch_size = 32
137 | num_classes = 10
138 | epochs = 200
139 | data_augmentation = True
140 | num_predictions = 20
141 |
142 | net = Network("CIRAR10")
143 | net.add(Layer("input", (32, 32, 3)))
144 | net.add(Conv2DLayer("conv1", 32, (3, 3), padding='same', activation='relu'))
145 | net.add(Conv2DLayer("conv2", 32, (3, 3), activation='relu'))
146 | net.add(MaxPool2DLayer("pool1", pool_size=(2, 2), dropout=0.25))
147 | net.add(Conv2DLayer("conv3", 64, (3, 3), padding='same', activation='relu'))
148 | net.add(Conv2DLayer("conv4", 64, (3, 3), activation='relu'))
149 | net.add(MaxPool2DLayer("pool2", pool_size=(2, 2), dropout=0.25))
150 | net.add(FlattenLayer("flatten"))
151 | net.add(Layer("hidden1", 512, activation='relu', vshape=(16, 32), dropout=0.5))
152 | net.add(Layer("output", num_classes, activation='softmax'))
153 | net.connect()
154 |
155 | # initiate RMSprop optimizer
156 | opt = RMSprop(lr=0.0001, decay=1e-6)
157 | net.compile(error='categorical_crossentropy',
158 | optimizer=opt)
159 | net.get_dataset("cifar10")
160 | widget = net.dashboard()
161 | widget.goto("begin")
162 | widget.goto("next")
163 | widget.goto("end")
164 | widget.goto("prev")
165 | widget.prop_one()
166 | net.dataset.slice(10)
167 | net.dataset.shuffle()
168 | net.dataset.split(.5)
169 | net.train(plot=False)
170 | net.propagate(net.dataset.inputs[0])
171 | net.dataset.clear()
172 |
--------------------------------------------------------------------------------
/conx/widgets.py:
--------------------------------------------------------------------------------
1 | # conx - a neural network library
2 | #
3 | # Copyright (c) Douglas S. Blank
4 | #
5 | # This program is free software; you can redistribute it and/or modify
6 | # it under the terms of the GNU General Public License as published by
7 | # the Free Software Foundation; either version 3 of the License, or
8 | # (at your option) any later version.
9 | #
10 | # This program is distributed in the hope that it will be useful,
11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 | # GNU General Public License for more details.
14 | #
15 | # You should have received a copy of the GNU General Public License
16 | # along with this program; if not, write to the Free Software
17 | # Foundation, Inc., 51 Franklin Street, Fifth Floor,
18 | # Boston, MA 02110-1301 USA
19 |
20 | import numpy as np
21 | import threading
22 | import random
23 | import time
24 |
25 | from IPython.display import Javascript, display
26 | import ipywidgets
27 | from ipywidgets import (HTML, Button, VBox, HBox, IntSlider, Select, Text,
28 | Layout, Label, FloatSlider, Checkbox, IntText,
29 | Box, Accordion, FloatText, Output, Widget, register,
30 | widget_serialization, DOMWidget)
31 | from traitlets import Bool, Dict, Int, Float, Unicode, List, Instance
32 |
33 | from .utils import (uri_to_image, AVAILABLE_COLORMAPS, get_colormap,
34 | dynamic_pictures_check)
35 |
36 | from ._version import __version__
37 |
38 | class _Player(threading.Thread):
39 | """
40 | Background thread for running a player.
41 | """
42 | def __init__(self, controller, time_wait=0.5):
43 | self.controller = controller
44 | threading.Thread.__init__(self)
45 | self.time_wait = time_wait
46 | self.can_run = threading.Event()
47 | self.can_run.clear() ## paused
48 | self.daemon =True ## allows program to exit without waiting for join
49 |
50 | def run(self):
51 | while True:
52 | self.can_run.wait()
53 | self.controller.goto("next")
54 | time.sleep(self.time_wait)
55 |
56 | def pause(self):
57 | self.can_run.clear()
58 |
59 | def resume(self):
60 | self.can_run.set()
61 |
62 | class SequenceViewer(VBox):
63 | """
64 | SequenceViewer
65 |
66 | Arguments:
67 | title (str) - Title of sequence
68 | function (callable) - takes an index 0 to length - 1. Function should
69 | a displayable or list of displayables
70 | length (int) - total number of frames in sequence
71 | play_rate (float) - seconds to wait between frames when auto-playing.
72 | Optional. Default is 0.5 seconds.
73 |
74 | >>> def function(index):
75 | ... return [None]
76 | >>> sv = SequenceViewer("Title", function, 10)
77 | >>> ## Do this manually for testing:
78 | >>> sv.initialize()
79 | None
80 | >>> ## Testing:
81 | >>> class Dummy:
82 | ... def update(self, result):
83 | ... return result
84 | >>> sv.displayers = [Dummy()]
85 | >>> print("Testing"); sv.goto("begin") # doctest: +ELLIPSIS
86 | Testing...
87 | >>> print("Testing"); sv.goto("end") # doctest: +ELLIPSIS
88 | Testing...
89 | >>> print("Testing"); sv.goto("prev") # doctest: +ELLIPSIS
90 | Testing...
91 | >>> print("Testing"); sv.goto("next") # doctest: +ELLIPSIS
92 | Testing...
93 |
94 | """
95 | def __init__(self, title, function, length, play_rate=0.5):
96 | self.player = _Player(self, play_rate)
97 | self.player.start()
98 | self.title = title
99 | self.function = function
100 | self.length = length
101 | self.output = Output()
102 | self.position_text = IntText(value=0, layout=Layout(width="100%"))
103 | self.total_text = Label(value="of %s" % self.length, layout=Layout(width="100px"))
104 | controls = self.make_controls()
105 | super().__init__([controls, self.output])
106 |
107 | def goto(self, position):
108 | #### Position it:
109 | if position == "begin":
110 | self.control_slider.value = 0
111 | elif position == "end":
112 | self.control_slider.value = self.length - 1
113 | elif position == "prev":
114 | if self.control_slider.value - 1 < 0:
115 | self.control_slider.value = self.length - 1 # wrap around
116 | else:
117 | self.control_slider.value = max(self.control_slider.value - 1, 0)
118 | elif position == "next":
119 | if self.control_slider.value + 1 > self.length - 1:
120 | self.control_slider.value = 0 # wrap around
121 | else:
122 | self.control_slider.value = min(self.control_slider.value + 1, self.length - 1)
123 | elif isinstance(position, int):
124 | self.control_slider.value = position
125 | self.position_text.value = self.control_slider.value
126 |
127 | def toggle_play(self, button):
128 | ## toggle
129 | if self.button_play.description == "Play":
130 | self.button_play.description = "Stop"
131 | self.button_play.icon = "pause"
132 | self.player.resume()
133 | else:
134 | self.button_play.description = "Play"
135 | self.button_play.icon = "play"
136 | self.player.pause()
137 |
138 | def make_controls(self):
139 | button_begin = Button(icon="fast-backward", layout=Layout(width='100%'))
140 | button_prev = Button(icon="backward", layout=Layout(width='100%'))
141 | button_next = Button(icon="forward", layout=Layout(width='100%'))
142 | button_end = Button(icon="fast-forward", layout=Layout(width='100%'))
143 | self.button_play = Button(icon="play", description="Play", layout=Layout(width="100%"))
144 | self.control_buttons = HBox([
145 | button_begin,
146 | button_prev,
147 | self.position_text,
148 | button_next,
149 | button_end,
150 | self.button_play,
151 | ], layout=Layout(width='100%', height="50px"))
152 | self.control_slider = IntSlider(description=self.title,
153 | continuous_update=False,
154 | min=0,
155 | max=max(self.length - 1, 0),
156 | value=0,
157 | style={"description_width": 'initial'},
158 | layout=Layout(width='100%'))
159 | ## Hook them up:
160 | button_begin.on_click(lambda button: self.goto("begin"))
161 | button_end.on_click(lambda button: self.goto("end"))
162 | button_next.on_click(lambda button: self.goto("next"))
163 | button_prev.on_click(lambda button: self.goto("prev"))
164 | self.button_play.on_click(self.toggle_play)
165 | self.control_slider.observe(self.update_slider_control, names='value')
166 | controls = VBox([HBox([self.control_slider, self.total_text], layout=Layout(height="40px")),
167 | self.control_buttons], layout=Layout(width='100%'))
168 | controls.on_displayed(lambda widget: self.initialize())
169 | return controls
170 |
171 | def initialize(self):
172 | results = self.function(self.control_slider.value)
173 | try:
174 | results = list(results)
175 | except:
176 | results = [results]
177 | self.displayers = [display(x, display_id=True) for x in results]
178 |
179 | def update_slider_control(self, change):
180 | if change["name"] == "value":
181 | self.position_text.value = self.control_slider.value
182 | self.output.clear_output(wait=True)
183 | results = self.function(self.control_slider.value)
184 | try:
185 | results = list(results)
186 | except:
187 | results = [results]
188 | for i in range(len(self.displayers)):
189 | self.displayers[i].update(results[i])
190 |
191 | class Dashboard(VBox):
192 | """
193 | Build the dashboard for Jupyter widgets. Requires running
194 | in a notebook/jupyterlab.
195 | """
196 | def __init__(self, net, width="95%", height="550px", play_rate=0.5):
197 | self._ignore_layer_updates = False
198 | self.player = _Player(self, play_rate)
199 | self.player.start()
200 | self.net = net
201 | r = random.randint(1, 1000000)
202 | self.class_id = "picture-dashboard-%s-%s" % (self.net.name, r)
203 | self._width = width
204 | self._height = height
205 | ## Global widgets:
206 | style = {"description_width": "initial"}
207 | self.feature_columns = IntText(description="Detail columns:",
208 | value=self.net.config["dashboard.features.columns"],
209 | min=0,
210 | max=1024,
211 | style=style)
212 | self.feature_scale = FloatText(description="Detail scale:",
213 | value=self.net.config["dashboard.features.scale"],
214 | min=0.1,
215 | max=10,
216 | style=style)
217 | self.feature_columns.observe(self.regenerate, names='value')
218 | self.feature_scale.observe(self.regenerate, names='value')
219 | ## Hack to center SVG as justify-content is broken:
220 | self.net_svg = HTML(value="""%s
""" % ("",), layout=Layout(
221 | width=self._width, overflow_x='auto', overflow_y="auto",
222 | justify_content="center"))
223 | # Make controls first:
224 | self.output = Output()
225 | controls = self.make_controls()
226 | config = self.make_config()
227 | super().__init__([config, controls, self.net_svg, self.output])
228 |
229 | def propagate(self, inputs):
230 | """
231 | Propagate inputs through the dashboard view of the network.
232 | """
233 | if dynamic_pictures_check():
234 | return self.net.propagate(inputs, class_id=self.class_id, update_pictures=True)
235 | else:
236 | self.regenerate(inputs=input)
237 |
238 | def goto(self, position):
239 | if len(self.net.dataset.inputs) == 0 or len(self.net.dataset.targets) == 0:
240 | return
241 | if self.control_select.value == "Train":
242 | length = len(self.net.dataset.train_inputs)
243 | elif self.control_select.value == "Test":
244 | length = len(self.net.dataset.test_inputs)
245 | #### Position it:
246 | if position == "begin":
247 | self.control_slider.value = 0
248 | elif position == "end":
249 | self.control_slider.value = length - 1
250 | elif position == "prev":
251 | if self.control_slider.value - 1 < 0:
252 | self.control_slider.value = length - 1 # wrap around
253 | else:
254 | self.control_slider.value = max(self.control_slider.value - 1, 0)
255 | elif position == "next":
256 | if self.control_slider.value + 1 > length - 1:
257 | self.control_slider.value = 0 # wrap around
258 | else:
259 | self.control_slider.value = min(self.control_slider.value + 1, length - 1)
260 | self.position_text.value = self.control_slider.value
261 |
262 |
263 | def change_select(self, change=None):
264 | """
265 | """
266 | self.update_control_slider(change)
267 | self.regenerate()
268 |
269 | def update_control_slider(self, change=None):
270 | self.net.config["dashboard.dataset"] = self.control_select.value
271 | if len(self.net.dataset.inputs) == 0 or len(self.net.dataset.targets) == 0:
272 | self.total_text.value = "of 0"
273 | self.control_slider.value = 0
274 | self.position_text.value = 0
275 | self.control_slider.disabled = True
276 | self.position_text.disabled = True
277 | for child in self.control_buttons.children:
278 | if not hasattr(child, "icon") or child.icon != "refresh":
279 | child.disabled = True
280 | return
281 | if self.control_select.value == "Test":
282 | self.total_text.value = "of %s" % len(self.net.dataset.test_inputs)
283 | minmax = (0, max(len(self.net.dataset.test_inputs) - 1, 0))
284 | if minmax[0] <= self.control_slider.value <= minmax[1]:
285 | pass # ok
286 | else:
287 | self.control_slider.value = 0
288 | self.control_slider.min = minmax[0]
289 | self.control_slider.max = minmax[1]
290 | if len(self.net.dataset.test_inputs) == 0:
291 | disabled = True
292 | else:
293 | disabled = False
294 | elif self.control_select.value == "Train":
295 | self.total_text.value = "of %s" % len(self.net.dataset.train_inputs)
296 | minmax = (0, max(len(self.net.dataset.train_inputs) - 1, 0))
297 | if minmax[0] <= self.control_slider.value <= minmax[1]:
298 | pass # ok
299 | else:
300 | self.control_slider.value = 0
301 | self.control_slider.min = minmax[0]
302 | self.control_slider.max = minmax[1]
303 | if len(self.net.dataset.train_inputs) == 0:
304 | disabled = True
305 | else:
306 | disabled = False
307 | self.control_slider.disabled = disabled
308 | self.position_text.disbaled = disabled
309 | self.position_text.value = self.control_slider.value
310 | for child in self.control_buttons.children:
311 | if not hasattr(child, "icon") or child.icon != "refresh":
312 | child.disabled = disabled
313 |
314 | def update_zoom_slider(self, change):
315 | if change["name"] == "value":
316 | self.net.config["svg_scale"] = self.zoom_slider.value
317 | self.regenerate()
318 |
319 | def update_position_text(self, change):
320 | # {'name': 'value', 'old': 2, 'new': 3, 'owner': IntText(value=3, layout=Layout(width='100%')), 'type': 'change'}
321 | self.control_slider.value = change["new"]
322 |
323 | def get_current_input(self):
324 | if self.control_select.value == "Train" and len(self.net.dataset.train_targets) > 0:
325 | return self.net.dataset.train_inputs[self.control_slider.value]
326 | elif self.control_select.value == "Test" and len(self.net.dataset.test_targets) > 0:
327 | return self.net.dataset.test_inputs[self.control_slider.value]
328 |
329 | def get_current_targets(self):
330 | if self.control_select.value == "Train" and len(self.net.dataset.train_targets) > 0:
331 | return self.net.dataset.train_targets[self.control_slider.value]
332 | elif self.control_select.value == "Test" and len(self.net.dataset.test_targets) > 0:
333 | return self.net.dataset.test_targets[self.control_slider.value]
334 |
335 | def update_slider_control(self, change):
336 | if len(self.net.dataset.inputs) == 0 or len(self.net.dataset.targets) == 0:
337 | self.total_text.value = "of 0"
338 | return
339 | if change["name"] == "value":
340 | self.position_text.value = self.control_slider.value
341 | if self.control_select.value == "Train" and len(self.net.dataset.train_targets) > 0:
342 | self.total_text.value = "of %s" % len(self.net.dataset.train_inputs)
343 | if self.net.model is None:
344 | return
345 | if not dynamic_pictures_check():
346 | self.regenerate(inputs=self.net.dataset.train_inputs[self.control_slider.value],
347 | targets=self.net.dataset.train_targets[self.control_slider.value])
348 | return
349 | output = self.net.propagate(self.net.dataset.train_inputs[self.control_slider.value],
350 | class_id=self.class_id, update_pictures=True)
351 | if self.feature_bank.value in self.net.layer_dict.keys():
352 | self.net.propagate_to_features(self.feature_bank.value, self.net.dataset.train_inputs[self.control_slider.value],
353 | cols=self.feature_columns.value, scale=self.feature_scale.value, html=False)
354 | if self.net.config["show_targets"]:
355 | if len(self.net.output_bank_order) == 1: ## FIXME: use minmax of output bank
356 | self.net.display_component([self.net.dataset.train_targets[self.control_slider.value]],
357 | "targets",
358 | class_id=self.class_id,
359 | minmax=(-1, 1))
360 | else:
361 | self.net.display_component(self.net.dataset.train_targets[self.control_slider.value],
362 | "targets",
363 | class_id=self.class_id,
364 | minmax=(-1, 1))
365 | if self.net.config["show_errors"]: ## minmax is error
366 | if len(self.net.output_bank_order) == 1:
367 | errors = np.array(output) - np.array(self.net.dataset.train_targets[self.control_slider.value])
368 | self.net.display_component([errors.tolist()],
369 | "errors",
370 | class_id=self.class_id,
371 | minmax=(-1, 1))
372 | else:
373 | errors = []
374 | for bank in range(len(self.net.output_bank_order)):
375 | errors.append( np.array(output[bank]) - np.array(self.net.dataset.train_targets[self.control_slider.value][bank]))
376 | self.net.display_component(errors, "errors", class_id=self.class_id, minmax=(-1, 1))
377 | elif self.control_select.value == "Test" and len(self.net.dataset.test_targets) > 0:
378 | self.total_text.value = "of %s" % len(self.net.dataset.test_inputs)
379 | if self.net.model is None:
380 | return
381 | if not dynamic_pictures_check():
382 | self.regenerate(inputs=self.net.dataset.test_inputs[self.control_slider.value],
383 | targets=self.net.dataset.test_targets[self.control_slider.value])
384 | return
385 | output = self.net.propagate(self.net.dataset.test_inputs[self.control_slider.value],
386 | class_id=self.class_id, update_pictures=True)
387 | if self.feature_bank.value in self.net.layer_dict.keys():
388 | self.net.propagate_to_features(self.feature_bank.value, self.net.dataset.test_inputs[self.control_slider.value],
389 | cols=self.feature_columns.value, scale=self.feature_scale.value, html=False)
390 | if self.net.config["show_targets"]: ## FIXME: use minmax of output bank
391 | self.net.display_component([self.net.dataset.test_targets[self.control_slider.value]],
392 | "targets",
393 | class_id=self.class_id,
394 | minmax=(-1, 1))
395 | if self.net.config["show_errors"]: ## minmax is error
396 | if len(self.net.output_bank_order) == 1:
397 | errors = np.array(output) - np.array(self.net.dataset.test_targets[self.control_slider.value])
398 | self.net.display_component([errors.tolist()],
399 | "errors",
400 | class_id=self.class_id,
401 | minmax=(-1, 1))
402 | else:
403 | errors = []
404 | for bank in range(len(self.net.output_bank_order)):
405 | errors.append( np.array(output[bank]) - np.array(self.net.dataset.test_targets[self.control_slider.value][bank]))
406 | self.net.display_component(errors, "errors", class_id=self.class_id, minmax=(-1, 1))
407 |
408 | def toggle_play(self, button):
409 | ## toggle
410 | if self.button_play.description == "Play":
411 | self.button_play.description = "Stop"
412 | self.button_play.icon = "pause"
413 | self.player.resume()
414 | else:
415 | self.button_play.description = "Play"
416 | self.button_play.icon = "play"
417 | self.player.pause()
418 |
419 | def prop_one(self, button=None):
420 | self.update_slider_control({"name": "value"})
421 |
422 | def regenerate(self, button=None, inputs=None, targets=None):
423 | ## Protection when deleting object on shutdown:
424 | if isinstance(button, dict) and 'new' in button and button['new'] is None:
425 | return
426 | ## Update the config:
427 | self.net.config["dashboard.features.bank"] = self.feature_bank.value
428 | self.net.config["dashboard.features.columns"] = self.feature_columns.value
429 | self.net.config["dashboard.features.scale"] = self.feature_scale.value
430 | inputs = inputs if inputs is not None else self.get_current_input()
431 | targets = targets if targets is not None else self.get_current_targets()
432 | features = None
433 | if self.feature_bank.value in self.net.layer_dict.keys() and inputs is not None:
434 | if self.net.model is not None:
435 | features = self.net.propagate_to_features(self.feature_bank.value, inputs,
436 | cols=self.feature_columns.value,
437 | scale=self.feature_scale.value, display=False)
438 | svg = """%s
""" % (self.net.to_svg(
439 | inputs=inputs,
440 | targets=targets,
441 | class_id=self.class_id,
442 | highlights={self.feature_bank.value: {
443 | "border_color": "orange",
444 | "border_width": 30,
445 | }}))
446 | if inputs is not None and features is not None:
447 | html_horizontal = """
448 |
449 |
450 | %s |
451 | %s %s |
452 |
453 |
"""
454 | html_vertical = """
455 |
456 |
457 | %s |
458 |
459 |
460 | %s %s |
461 |
462 |
"""
463 | self.net_svg.value = (html_vertical if self.net.config["svg_rotate"] else html_horizontal) % (
464 | svg, "%s details" % self.feature_bank.value, features)
465 | else:
466 | self.net_svg.value = svg
467 |
468 | def make_colormap_image(self, colormap_name):
469 | from .layers import Layer
470 | if not colormap_name:
471 | colormap_name = get_colormap()
472 | layer = Layer("Colormap", 100)
473 | minmax = layer.get_act_minmax()
474 | image = layer.make_image(np.arange(minmax[0], minmax[1], .01),
475 | colormap_name,
476 | {"pixels_per_unit": 1,
477 | "svg_rotate": self.net.config["svg_rotate"]}).resize((300, 25))
478 | return image
479 |
480 | def set_attr(self, obj, attr, value):
481 | if value not in [{}, None]: ## value is None when shutting down
482 | if isinstance(value, dict):
483 | value = value["value"]
484 | if isinstance(obj, dict):
485 | obj[attr] = value
486 | else:
487 | setattr(obj, attr, value)
488 | ## was crashing on Widgets.__del__, if get_ipython() no longer existed
489 | self.regenerate()
490 |
491 | def make_controls(self):
492 | layout = Layout(width='100%', height="100%")
493 | button_begin = Button(icon="fast-backward", layout=layout)
494 | button_prev = Button(icon="backward", layout=layout)
495 | button_next = Button(icon="forward", layout=layout)
496 | button_end = Button(icon="fast-forward", layout=layout)
497 | #button_prop = Button(description="Propagate", layout=Layout(width='100%'))
498 | #button_train = Button(description="Train", layout=Layout(width='100%'))
499 | self.button_play = Button(icon="play", description="Play", layout=layout)
500 | step_down = Button(icon="sort-down", layout=Layout(width="95%", height="100%"))
501 | step_up = Button(icon="sort-up", layout=Layout(width="95%", height="100%"))
502 | up_down = HBox([step_down, step_up], layout=Layout(width="100%", height="100%"))
503 | refresh_button = Button(icon="refresh", layout=Layout(width="25%", height="100%"))
504 |
505 | self.position_text = IntText(value=0, layout=layout)
506 |
507 | self.control_buttons = HBox([
508 | button_begin,
509 | button_prev,
510 | #button_train,
511 | self.position_text,
512 | button_next,
513 | button_end,
514 | self.button_play,
515 | up_down,
516 | refresh_button
517 | ], layout=Layout(width='100%', height="100%"))
518 | length = (len(self.net.dataset.train_inputs) - 1) if len(self.net.dataset.train_inputs) > 0 else 0
519 | self.control_slider = IntSlider(description="Dataset index",
520 | continuous_update=False,
521 | min=0,
522 | max=max(length, 0),
523 | value=0,
524 | layout=Layout(width='100%'))
525 | if self.net.config["dashboard.dataset"] == "Train":
526 | length = len(self.net.dataset.train_inputs)
527 | else:
528 | length = len(self.net.dataset.test_inputs)
529 | self.total_text = Label(value="of %s" % length, layout=Layout(width="100px"))
530 | self.zoom_slider = FloatSlider(description="Zoom",
531 | continuous_update=False,
532 | min=0, max=1.0,
533 | style={"description_width": 'initial'},
534 | layout=Layout(width="65%"),
535 | value=self.net.config["svg_scale"] if self.net.config["svg_scale"] is not None else 0.5)
536 |
537 | ## Hook them up:
538 | button_begin.on_click(lambda button: self.goto("begin"))
539 | button_end.on_click(lambda button: self.goto("end"))
540 | button_next.on_click(lambda button: self.goto("next"))
541 | button_prev.on_click(lambda button: self.goto("prev"))
542 | self.button_play.on_click(self.toggle_play)
543 | self.control_slider.observe(self.update_slider_control, names='value')
544 | refresh_button.on_click(lambda widget: (self.update_control_slider(),
545 | self.output.clear_output(),
546 | self.regenerate()))
547 | step_down.on_click(lambda widget: self.move_step("down"))
548 | step_up.on_click(lambda widget: self.move_step("up"))
549 | self.zoom_slider.observe(self.update_zoom_slider, names='value')
550 | self.position_text.observe(self.update_position_text, names='value')
551 | # Put them together:
552 | controls = VBox([HBox([self.control_slider, self.total_text], layout=Layout(height="40px")),
553 | self.control_buttons], layout=Layout(width='100%'))
554 |
555 | #net_page = VBox([control, self.net_svg], layout=Layout(width='95%'))
556 | controls.on_displayed(lambda widget: self.regenerate())
557 | return controls
558 |
559 | def move_step(self, direction):
560 | """
561 | Move the layer stepper up/down through network
562 | """
563 | options = [""] + [layer.name for layer in self.net.layers]
564 | index = options.index(self.feature_bank.value)
565 | if direction == "up":
566 | new_index = (index + 1) % len(options)
567 | else: ## down
568 | new_index = (index - 1) % len(options)
569 | self.feature_bank.value = options[new_index]
570 | self.regenerate()
571 |
572 | def make_config(self):
573 | layout = Layout()
574 | style = {"description_width": "initial"}
575 | checkbox1 = Checkbox(description="Show Targets", value=self.net.config["show_targets"],
576 | layout=layout, style=style)
577 | checkbox1.observe(lambda change: self.set_attr(self.net.config, "show_targets", change["new"]), names='value')
578 | checkbox2 = Checkbox(description="Errors", value=self.net.config["show_errors"],
579 | layout=layout, style=style)
580 | checkbox2.observe(lambda change: self.set_attr(self.net.config, "show_errors", change["new"]), names='value')
581 |
582 | hspace = IntText(value=self.net.config["hspace"], description="Horizontal space between banks:",
583 | style=style, layout=layout)
584 | hspace.observe(lambda change: self.set_attr(self.net.config, "hspace", change["new"]), names='value')
585 | vspace = IntText(value=self.net.config["vspace"], description="Vertical space between layers:",
586 | style=style, layout=layout)
587 | vspace.observe(lambda change: self.set_attr(self.net.config, "vspace", change["new"]), names='value')
588 | self.feature_bank = Select(description="Details:", value=self.net.config["dashboard.features.bank"],
589 | options=[""] + [layer.name for layer in self.net.layers],
590 | rows=1)
591 | self.feature_bank.observe(self.regenerate, names='value')
592 | self.control_select = Select(
593 | options=['Test', 'Train'],
594 | value=self.net.config["dashboard.dataset"],
595 | description='Dataset:',
596 | rows=1
597 | )
598 | self.control_select.observe(self.change_select, names='value')
599 | column1 = [self.control_select,
600 | self.zoom_slider,
601 | hspace,
602 | vspace,
603 | HBox([checkbox1, checkbox2]),
604 | self.feature_bank,
605 | self.feature_columns,
606 | self.feature_scale
607 | ]
608 | ## Make layer selectable, and update-able:
609 | column2 = []
610 | layer = self.net.layers[-1]
611 | self.layer_select = Select(description="Layer:", value=layer.name,
612 | options=[layer.name for layer in
613 | self.net.layers],
614 | rows=1)
615 | self.layer_select.observe(self.update_layer_selection, names='value')
616 | column2.append(self.layer_select)
617 | self.layer_visible_checkbox = Checkbox(description="Visible", value=layer.visible, layout=layout)
618 | self.layer_visible_checkbox.observe(self.update_layer, names='value')
619 | column2.append(self.layer_visible_checkbox)
620 | self.layer_colormap = Select(description="Colormap:",
621 | options=[""] + AVAILABLE_COLORMAPS,
622 | value=layer.colormap if layer.colormap is not None else "", layout=layout, rows=1)
623 | self.layer_colormap_image = HTML(value="""
""" % self.net._image_to_uri(self.make_colormap_image(layer.colormap)))
624 | self.layer_colormap.observe(self.update_layer, names='value')
625 | column2.append(self.layer_colormap)
626 | column2.append(self.layer_colormap_image)
627 | ## get dynamic minmax; if you change it it will set it in layer as override:
628 | minmax = layer.get_act_minmax()
629 | self.layer_mindim = FloatText(description="Leftmost color maps to:", value=minmax[0], style=style)
630 | self.layer_maxdim = FloatText(description="Rightmost color maps to:", value=minmax[1], style=style)
631 | self.layer_mindim.observe(self.update_layer, names='value')
632 | self.layer_maxdim.observe(self.update_layer, names='value')
633 | column2.append(self.layer_mindim)
634 | column2.append(self.layer_maxdim)
635 | output_shape = layer.get_output_shape()
636 | self.layer_feature = IntText(value=layer.feature, description="Feature to show:", style=style)
637 | self.svg_rotate = Checkbox(description="Rotate", value=layer.visible, layout=layout)
638 | self.layer_feature.observe(self.update_layer, names='value')
639 | column2.append(self.layer_feature)
640 | self.svg_rotate = Checkbox(description="Rotate network",
641 | value=self.net.config["svg_rotate"],
642 | style={"description_width": 'initial'},
643 | layout=Layout(width="52%"))
644 | self.svg_rotate.observe(lambda change: self.set_attr(self.net.config, "svg_rotate", change["new"]), names='value')
645 | self.save_config_button = Button(icon="save", layout=Layout(width="10%"))
646 | self.save_config_button.on_click(self.save_config)
647 | column2.append(HBox([self.svg_rotate, self.save_config_button]))
648 | config_children = HBox([VBox(column1, layout=Layout(width="100%")),
649 | VBox(column2, layout=Layout(width="100%"))])
650 | accordion = Accordion(children=[config_children])
651 | accordion.set_title(0, self.net.name)
652 | accordion.selected_index = None
653 | return accordion
654 |
655 | def save_config(self, widget=None):
656 | self.net.save_config()
657 |
658 | def update_layer(self, change):
659 | """
660 | Update the layer object, and redisplay.
661 | """
662 | if self._ignore_layer_updates:
663 | return
664 | ## The rest indicates a change to a display variable.
665 | ## We need to save the value in the layer, and regenerate
666 | ## the display.
667 | # Get the layer:
668 | layer = self.net[self.layer_select.value]
669 | # Save the changed value in the layer:
670 | layer.feature = self.layer_feature.value
671 | layer.visible = self.layer_visible_checkbox.value
672 | ## These three, dealing with colors of activations,
673 | ## can be done with a prop_one():
674 | if "color" in change["owner"].description.lower():
675 | ## Matches: Colormap, lefmost color, rightmost color
676 | ## overriding dynamic minmax!
677 | layer.minmax = (self.layer_mindim.value, self.layer_maxdim.value)
678 | layer.minmax = (self.layer_mindim.value, self.layer_maxdim.value)
679 | layer.colormap = self.layer_colormap.value if self.layer_colormap.value else None
680 | self.layer_colormap_image.value = """
""" % self.net._image_to_uri(self.make_colormap_image(layer.colormap))
681 | self.prop_one()
682 | else:
683 | self.regenerate()
684 |
685 | def update_layer_selection(self, change):
686 | """
687 | Just update the widgets; don't redraw anything.
688 | """
689 | ## No need to redisplay anything
690 | self._ignore_layer_updates = True
691 | ## First, get the new layer selected:
692 | layer = self.net[self.layer_select.value]
693 | ## Now, let's update all of the values without updating:
694 | self.layer_visible_checkbox.value = layer.visible
695 | self.layer_colormap.value = layer.colormap if layer.colormap != "" else ""
696 | self.layer_colormap_image.value = """
""" % self.net._image_to_uri(self.make_colormap_image(layer.colormap))
697 | minmax = layer.get_act_minmax()
698 | self.layer_mindim.value = minmax[0]
699 | self.layer_maxdim.value = minmax[1]
700 | self.layer_feature.value = layer.feature
701 | self._ignore_layer_updates = False
702 |
703 | @register("CameraWidget")
704 | class CameraWidget(DOMWidget):
705 | """
706 | Represents a media source.
707 |
708 | >>> cam = CameraWidget()
709 |
710 | """
711 | _view_module = Unicode('camera').tag(sync=True)
712 | _view_name = Unicode('CameraView').tag(sync=True)
713 | _model_module = Unicode('camera').tag(sync=True)
714 | _model_name = Unicode('CameraModel').tag(sync=True)
715 | _view_module_version = Unicode(__version__).tag(sync=True)
716 | # Specify audio constraint and video constraint as a boolean or dict.
717 | audio = Bool(False).tag(sync=True)
718 | video = Bool(True).tag(sync=True)
719 | image = Unicode('').tag(sync=True)
720 | image_count = Int(0).tag(sync=True)
721 |
722 | def __init__(self, *args, **kwargs):
723 | display(Javascript(get_camera_javascript()))
724 | super().__init__(*args, **kwargs)
725 |
726 | def get_image(self):
727 | if self.image:
728 | image = uri_to_image(self.image)
729 | image = image.convert("RGB")
730 | return image
731 |
732 | def get_data(self):
733 | if self.image:
734 | image = uri_to_image(self.image)
735 | ## trim from 4 to 3 dimensions: (remove alpha)
736 | # remove the 3 index of dimension index 2 (the A of RGBA color)
737 | image = np.delete(image, np.s_[3], 2)
738 | return (np.array(image).astype("float32") / 255.0)
739 |
740 | def get_camera_javascript(width=320, height=240):
741 | if ipywidgets._version.version_info < (7,):
742 | jupyter_widgets = "jupyter-js-widgets"
743 | else:
744 | jupyter_widgets = "@jupyter-widgets/base"
745 | camera_javascript = """
746 | require.undef('camera');
747 |
748 | define('camera', ["%(jupyter_widgets)s"], function(widgets) {
749 | var CameraView = widgets.DOMWidgetView.extend({
750 | defaults: _.extend({}, widgets.DOMWidgetView.prototype.defaults, {
751 | _view_name: 'CameraView',
752 | _view_module: 'camera',
753 | audio: false,
754 | video: true,
755 | }),
756 |
757 | initialize: function() {
758 |
759 | var div = document.createElement('div');
760 | var el = document.createElement('video');
761 | el.setAttribute('id', "video_widget");
762 | el.setAttribute('width', %(width)s);
763 | el.setAttribute('height', %(height)s);
764 | div.appendChild(el);
765 | var canvas = document.createElement('canvas');
766 | canvas.setAttribute('id', "video_canvas");
767 | canvas.setAttribute('width', %(width)s);
768 | canvas.setAttribute('height', %(height)s);
769 | div.appendChild(canvas);
770 | div.appendChild(document.createElement('br'));
771 | var button = document.createElement('button');
772 | button.innerHTML = "Take a Picture";
773 | var that = this;
774 | button.onclick = function(b) {
775 | var video = document.querySelector("#video_widget");
776 | var canvas = document.querySelector("#video_canvas");
777 | if (video) {
778 | canvas.getContext('2d').drawImage(video, 0, 0, canvas.width, canvas.height);
779 | var url = canvas.toDataURL('image/png');
780 | if (that.model) {
781 | that.model.set('image', url);
782 | that.model.save_changes();
783 | }
784 | }
785 | };
786 | div.appendChild(button);
787 | this.setElement(div);
788 | CameraView.__super__.initialize.apply(this, arguments);
789 | },
790 |
791 | render: function() {
792 | var that = this;
793 | that.model.stream.then(function(stream) {
794 | that.el.children[0].src = window.URL.createObjectURL(stream);
795 | that.el.children[0].play();
796 | });
797 | }
798 | });
799 |
800 | var CameraModel = widgets.DOMWidgetModel.extend({
801 | defaults: _.extend({}, widgets.DOMWidgetModel.prototype.defaults, {
802 | _model_name: 'CameraModel',
803 | _model_module: 'camera',
804 | audio: false,
805 | video: true
806 | }),
807 |
808 | initialize: function() {
809 | CameraModel.__super__.initialize.apply(this, arguments);
810 | // Get the camera permissions
811 | this.stream = navigator.mediaDevices.getUserMedia({audio: false, video: true});
812 | }
813 | });
814 | return {
815 | CameraModel: CameraModel,
816 | CameraView: CameraView
817 | }
818 | });
819 | """ % {"width": width, "height": height, "jupyter_widgets": jupyter_widgets}
820 | return camera_javascript
821 |
--------------------------------------------------------------------------------
/data/cmu_faces_full_size.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ArtificialIntelligenceToolkit/conx/365f4c3e8712ea455c96eba072058f9d86f40af3/data/cmu_faces_full_size.npz
--------------------------------------------------------------------------------
/data/cmu_faces_half_size.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ArtificialIntelligenceToolkit/conx/365f4c3e8712ea455c96eba072058f9d86f40af3/data/cmu_faces_half_size.npz
--------------------------------------------------------------------------------
/data/cmu_faces_quarter_size.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ArtificialIntelligenceToolkit/conx/365f4c3e8712ea455c96eba072058f9d86f40af3/data/cmu_faces_quarter_size.npz
--------------------------------------------------------------------------------
/data/figure_ground_a.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ArtificialIntelligenceToolkit/conx/365f4c3e8712ea455c96eba072058f9d86f40af3/data/figure_ground_a.npy
--------------------------------------------------------------------------------
/data/fingers.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ArtificialIntelligenceToolkit/conx/365f4c3e8712ea455c96eba072058f9d86f40af3/data/fingers.npz
--------------------------------------------------------------------------------
/data/grid.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ArtificialIntelligenceToolkit/conx/365f4c3e8712ea455c96eba072058f9d86f40af3/data/grid.png
--------------------------------------------------------------------------------
/data/gridfonts.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ArtificialIntelligenceToolkit/conx/365f4c3e8712ea455c96eba072058f9d86f40af3/data/gridfonts.npy
--------------------------------------------------------------------------------
/data/gridfonts.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | def make_figure_ground_a():
4 | ## Letters are 17 rows x 9 cols
5 | ## leaves off top 4 and bottom 4
6 | ## from regular gridfont letters
7 | data = []
8 | letter = []
9 | # First, read file
10 | ## Original data set was
11 | ## source -> target
12 | ## letter, brim, body -> letter, brim, body
13 | with open("figure_ground_a.dat") as fp:
14 | line = fp.readline()
15 | while line:
16 | line = line.strip()
17 | if line:
18 | L = [round(float(v)) for v in line.split(" ")]
19 | letter.append(L)
20 | else:
21 | data.append(letter)
22 | letter = []
23 | line = fp.readline()
24 | if letter:
25 | data.append(letter)
26 | letter = []
27 | ## we just want the letter, brim, body
28 | dict = make_dict(data)
29 | ds = make_dataset(dict, data)
30 | np.save("figure_ground_a.npy", ds)
31 |
32 | def make_dict(data):
33 | dict = {}
34 | for i in range(len(data)):
35 | vector = tuple(data[i][0])
36 | if vector not in dict:
37 | dict[vector] = i
38 | return dict
39 |
40 | def make_dataset(dict, data):
41 | dataset = []
42 | for v in dict:
43 | index = dict[v]
44 | figure = data[index][1]
45 | ground = data[index][2]
46 | dataset.append([v, [figure, ground]])
47 | return dataset
48 |
49 | def get_bits(byte):
50 | lookup = {
51 | "0": 0,
52 | "1": 1,
53 | "2": 2,
54 | "3": 3,
55 | "4": 4,
56 | "5": 5,
57 | "6": 6,
58 | "7": 7,
59 | "8": 8,
60 | "9": 9,
61 | "A": 10,
62 | "B": 11,
63 | "C": 12,
64 | "D": 13,
65 | "E": 14,
66 | "F": 15,
67 | }
68 | return ("0000" + (bin(lookup[byte.upper()])[2:]))[-4:]
69 |
70 | def bit2pos(bit):
71 | """
72 | Given bit code, return list of (x,y) for
73 | each pixel to set.
74 |
75 | 0 1 2 3 4 5 6 7 8
76 | 0 xx 00 00 00 xx 11 11 11 xx
77 | 1 14 44 32 15 45 33 16
78 | 2 14 xx 15 xx 16
79 | 3 14 32 44 15 33 45 16
80 | 4 xx 02 02 02 xx 03 03 03 xx
81 | 5 17 46 34 18 47 35 19
82 | 6 17 xx 18 xx 19
83 | 7 17 34 46 18 35 47 19
84 | 8 xx 04 04 04 xx 05 05 05 xx
85 | 9 20 48 36 21 49 37 22
86 | 10 20 xx 21 xx 22
87 | 11 20 36 48 21 37 49 22
88 | 12 xx 06 06 06 xx 07 07 07 xx
89 | 13 23 50 38 24 51 39 25
90 | 14 23 xx 24 xx 25
91 | 15 23 38 50 24 39 51 25
92 | 16 xx 08 08 08 xx 09 09 09 xx
93 | 17 26 52 40 27 53 41 28
94 | 18 26 xx 27 xx 28
95 | 19 26 40 52 27 41 53 28
96 | 20 xx 10 10 10 xx 11 11 11 xx
97 | 21 29 54 42 30 55 43 31
98 | 22 29 xx 30 xx 31
99 | 23 29 42 54 30 43 55 31
100 | 24 xx 12 12 12 xx 13 13 13 xx
101 | """
102 | if bit == 0:
103 | return [(0,0), (1,0), (2,0), (3,0), (4,0)]
104 | elif bit == 1:
105 | return [(4,0), (5,0), (6,0), (7,0), (8,0)]
106 | elif bit == 2:
107 | return [(0,4), (1,4), (2,4), (3,4), (4,4)]
108 | elif bit == 3:
109 | return [(4,4), (5,4), (6,4), (7,4), (8,4)]
110 | elif bit == 4:
111 | return [(0,8), (1,8), (2,8), (3,8), (4,8)]
112 | elif bit == 5:
113 | return [(4,8), (5,8), (6,8), (7,8), (8,8)]
114 | elif bit == 6:
115 | return [(0,12), (1,12), (2,12), (3,12), (4,12)]
116 | elif bit == 7:
117 | return [(4,12), (5,12), (6,12), (7,12), (8,12)]
118 | elif bit == 8:
119 | return [(0,16), (1,16), (2,16), (3,16), (4,16)]
120 | elif bit == 9:
121 | return [(4,16), (5,16), (6,16), (7,16), (8,16)]
122 | elif bit == 10:
123 | return [(0,20), (1,20), (2,20), (3,20), (4,20)]
124 | elif bit == 11:
125 | return [(4,20), (5,20), (6,20), (7,20), (8,20)]
126 | elif bit == 12:
127 | return [(0,24), (1,24), (2,24), (3,24), (4,24)]
128 | elif bit == 13:
129 | return [(4,24), (5,24), (6,24), (7,24), (8,24)]
130 | elif bit == 14:
131 | return [(0,0), (0,1), (0,2), (0,3), (0,4)]
132 | elif bit == 15:
133 | return [(4,0), (4,1), (4,2), (4,3), (4,4)]
134 | elif bit == 16:
135 | return [(8,0), (8,1), (8,2), (8,3), (8,4)]
136 | elif bit == 17:
137 | return [(0,4), (0,5), (0,6), (0,7), (0,8)]
138 | elif bit == 18:
139 | return [(4,4), (4,5), (4,6), (4,7), (4,8)]
140 | elif bit == 19:
141 | return [(8,4), (8,5), (8,6), (8,7), (8,8)]
142 | elif bit == 20:
143 | return [(0,8), (0,9), (0,10), (0,11), (0,12)]
144 | elif bit == 21:
145 | return [(4,8), (4,9), (4,10), (4,11), (4,12)]
146 | elif bit == 22:
147 | return [(8,8), (8,9), (8,10), (8,11), (8,12)]
148 | elif bit == 23:
149 | return [(0,12), (0,13), (0,14), (0,15), (0,16)]
150 | elif bit == 24:
151 | return [(4,12), (4,13), (4,14), (4,15), (4,16)]
152 | elif bit == 25:
153 | return [(8,12), (8,13), (8,14), (8,15), (8,16)]
154 | elif bit == 26:
155 | return [(0,16), (0,17), (0,18), (0,19), (0,20)]
156 | elif bit == 27:
157 | return [(4,16), (4,17), (4,18), (4,19), (4,20)]
158 | elif bit == 28:
159 | return [(8,16), (8,17), (8,18), (8,19), (8,20)]
160 | elif bit == 29:
161 | return [(0,20), (0,21), (0,22), (0,23), (0,24)]
162 | elif bit == 30:
163 | return [(4,20), (4,21), (4,22), (4,23), (4,24)]
164 | elif bit == 31:
165 | return [(8,20), (8,21), (8,22), (8,23), (8,24)]
166 | ## lower-left, to upper right
167 | elif bit == 32:
168 | return [(0,4), (1,3), (2,2), (3,1), (4,0)]
169 | elif bit == 33:
170 | return [(4,4), (5,3), (6,2), (7,1), (8,0)]
171 | elif bit == 34:
172 | return [(0,8), (1,7), (2,6), (3,5), (4,4)]
173 | elif bit == 35:
174 | return [(4,8), (5,7), (6,6), (7,5), (8,4)]
175 | elif bit == 36:
176 | return [(0,12), (1,11), (2,10), (3,9), (4,8)]
177 | elif bit == 37:
178 | return [(4,12), (5,11), (6,10), (7,9), (8,8)]
179 | elif bit == 38:
180 | return [(0,16), (1,15), (2,14), (3,13), (4,12)]
181 | elif bit == 39:
182 | return [(4,16), (5,15), (6,14), (7,13), (8,12)]
183 | elif bit == 40:
184 | return [(0,20), (1,19), (2,18), (3,17), (4,16)]
185 | elif bit == 41:
186 | return [(4,20), (5,19), (6,18), (7,17), (8,16)]
187 | elif bit == 42:
188 | return [(0,24), (1,23), (2,22), (3,21), (4,20)]
189 | elif bit == 43:
190 | return [(4,24), (5,23), (6,22), (7,21), (8,20)]
191 | ## upper-left to lower-right:
192 | elif bit == 44:
193 | return [(0,0), (1,1), (2,2), (3,3), (4,4)]
194 | elif bit == 45:
195 | return [(4,0), (5,1), (6,2), (7,3), (8,4)]
196 | elif bit == 46:
197 | return [(0,4), (1,5), (2,6), (3,7), (4,8)]
198 | elif bit == 47:
199 | return [(4,4), (5,5), (6,6), (7,7), (8,8)]
200 | elif bit == 48:
201 | return [(0,8), (1,9), (2,10), (3,11), (4,12)]
202 | elif bit == 49:
203 | return [(4,8), (5,9), (6,10), (7,11), (8,12)]
204 | elif bit == 50:
205 | return [(0,12), (1,13), (2,14), (3,15), (4,16)]
206 | elif bit == 51:
207 | return [(4,12), (5,13), (6,14), (7,15), (8,16)]
208 | elif bit == 52:
209 | return [(0,16), (1,17), (2,18), (3,19), (4,20)]
210 | elif bit == 53:
211 | return [(4,16), (5,17), (6,18), (7,19), (8,20)]
212 | elif bit == 54:
213 | return [(0,20), (1,21), (2,22), (3,23), (4,24)]
214 | elif bit == 55:
215 | return [(4,20), (5,21), (6,22), (7,23), (8,24)]
216 | else:
217 | raise Exception("no such bit number")
218 |
219 | def make_letter(bits):
220 | array = [[0.0 for row in range(25)] for col in range(9)]
221 | for index in range(len(bits)):
222 | if bits[index] == "1":
223 | positions = bit2pos(index)
224 | for (x,y) in positions:
225 | array[x][y] = 1.0
226 | letter = np.array(array)
227 | letter = letter.swapaxes(0, 1)
228 | return letter.tolist()
229 | #return array
230 |
231 | def read_gridfonts():
232 | data = []
233 | labels = []
234 | with open("gridfonts.dat") as fp:
235 | line = fp.readline()
236 | while line:
237 | if " : " in line or line == "\n":
238 | line = fp.readline()
239 | continue
240 | line = line.strip()
241 | letter, code = line.split(" ")
242 | #print(letter, code)
243 | bits = "".join([get_bits(byte) for byte in code])
244 | #print(bits)
245 | data.append(make_letter(bits))
246 | labels.append(letter)
247 | line = fp.readline()
248 | return data, labels
249 |
250 | def display_letter(letter):
251 | for row in range(25):
252 | for col in range(9):
253 | print( " X"[int(letter[row][col])], end="")
254 | print()
255 | print()
256 |
257 | def make_gridfonts():
258 | data, labels = read_gridfonts()
259 | np.save("gridfonts.npy", [data, labels])
260 |
261 | make_figure_ground_a()
262 | make_gridfonts()
263 |
--------------------------------------------------------------------------------
/data/mnist.h5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ArtificialIntelligenceToolkit/conx/365f4c3e8712ea455c96eba072058f9d86f40af3/data/mnist.h5
--------------------------------------------------------------------------------
/data/mnist.py:
--------------------------------------------------------------------------------
1 | import h5py
2 | import numpy as np
3 | from keras.datasets import mnist
4 | from keras.utils import to_categorical
5 |
6 | # input image dimensions
7 | img_rows, img_cols = 28, 28
8 | # the data, shuffled and split between train and test sets
9 | (x_train, y_train), (x_test, y_test) = mnist.load_data()
10 | x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
11 | x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
12 | input_shape = (img_rows, img_cols, 1)
13 | x_train = x_train.astype('float16')
14 | x_test = x_test.astype('float16')
15 | inputs = np.concatenate((x_train,x_test)) / 255
16 | labels = np.concatenate((y_train,y_test)) # ints, 0 to 10
17 | ###########################################
18 | # fix mis-labeled image(s) in Keras dataset
19 | labels[10994] = 9
20 | ###########################################
21 | targets = to_categorical(labels).astype("uint8")
22 | string = h5py.special_dtype(vlen=str)
23 | labels = np.array([str(label) for label in labels], dtype=string)
24 |
25 | print("creating h5...")
26 | with h5py.File("mnist.h5", "w") as h5:
27 | dset = h5.create_dataset('inputs', data=[inputs], compression='gzip', compression_opts=9)
28 | dset = h5.create_dataset('targets', data=[targets], compression='gzip', compression_opts=9)
29 | dset = h5.create_dataset('labels', data=[labels], compression='gzip', compression_opts=9)
30 | print("done!")
31 |
--------------------------------------------------------------------------------
/data/mnist_images.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ArtificialIntelligenceToolkit/conx/365f4c3e8712ea455c96eba072058f9d86f40af3/data/mnist_images.png
--------------------------------------------------------------------------------
/docker/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM jupyter/tensorflow-notebook
2 |
3 | LABEL maintainer="Douglas Blank "
4 |
5 | RUN pip install conx==3.6.0 --upgrade --no-cache-dir
6 | RUN pip install jyro --upgrade --no-cache-dir
7 | RUN pip install jupyter notebook --upgrade --no-cache-dir
8 |
9 | RUN sudo apt install --yes ffmpeg || true
10 | RUN sudo apt install --yes libffi-dev libffi6 || true
11 |
12 | RUN python -c "import conx as cx; cx.Dataset.get('mnist')"
13 | RUN python -c "import conx as cx; cx.Dataset.get('cifar10')"
14 | RUN python -c "import conx as cx; cx.Dataset.get('cmu_faces_full_size')"
15 | RUN python -c "import conx as cx; cx.Dataset.get('cmu_faces_half_size')"
16 | RUN python -c "import conx as cx; cx.Dataset.get('cmu_faces_quarter_size')"
17 |
--------------------------------------------------------------------------------
/docker/Makefile:
--------------------------------------------------------------------------------
1 | build:
2 | sudo docker image build -t "calysto/conx" .
3 |
--------------------------------------------------------------------------------
/docker/README.md:
--------------------------------------------------------------------------------
1 |
2 | 1. Install Docker
3 |
4 | 2. Run the image
5 |
6 | ```
7 | sudo docker run --rm -v "$PWD":/home/jovyan/work -p 8888:8888 calysto/conx
8 | ```
9 |
10 | 3. Open the URL shown at the bottom of the message from the above command.
11 |
--------------------------------------------------------------------------------
/docs/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line.
5 | SPHINXOPTS =
6 | SPHINXBUILD = sphinx-build
7 | SPHINXPROJ = conx
8 | SOURCEDIR = source
9 | BUILDDIR = build
10 | export READTHEDOCS=True
11 |
12 | # Put it first so that "make" without argument is like "make help".
13 | help:
14 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
15 |
16 | .PHONY: help Makefile
17 |
18 | # Catch-all target: route all unknown targets to Sphinx using the new
19 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
20 |
21 | clean: Makefile
22 | rm -f source/*.ipynb
23 | rm -f source/*.jpg
24 | rm -f source/*.png
25 | rm -f source/_static/*.mp4
26 | rm -f source/*.gif
27 | rm -f source/*.md
28 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
29 |
30 | %: Makefile
31 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
32 |
--------------------------------------------------------------------------------
/docs/requirements.txt:
--------------------------------------------------------------------------------
1 | numpy
2 | keras
3 | matplotlib
4 | ipywidgets>=7.0
5 | Pillow
6 | IPython
7 | tensorflow
8 | theano
9 | pypandoc
10 | sphinx_rtd_theme
11 | nbsphinx
12 | jupyter_sphinx
13 | sphinxcontrib-napoleon
14 | recommonmark
15 | sklearn
16 | tqdm
17 | requests
18 | pydot
19 | cairosvg
20 |
--------------------------------------------------------------------------------
/docs/source/_static/css/custom.css:
--------------------------------------------------------------------------------
1 |
2 | .wy-nav-content {
3 | max-width: 100%;
4 | }
5 |
6 |
--------------------------------------------------------------------------------
/docs/source/conf.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | #
4 | # conx documentation build configuration file
5 | #
6 |
7 | import io
8 | import os
9 | import sys
10 | import glob
11 | import shutil
12 |
13 | sys.path.insert(0, os.path.abspath('../..'))
14 |
15 | blacklist = ["00_Index.ipynb"]
16 |
17 | ## dependencies: get ../../notebooks/*.ipynb files
18 |
19 | print("Copying updated ../../notebooks/ files ...")
20 |
21 | def copyfiles(filenames, destination):
22 | for filename in filenames:
23 | path, dst = os.path.split(filename)
24 | if dst in blacklist:
25 | continue
26 | dst = os.path.join(destination, dst)
27 | if os.path.isfile(dst): # dst exists here
28 | dst_time = os.path.getmtime(dst)
29 | src_time = os.path.getmtime(filename)
30 | if src_time > dst_time: # the src time > dst time
31 | copy_it = True # it is updated
32 | else:
33 | copy_it = False # not updated
34 | else:
35 | copy_it = True # doesn't exist
36 | if copy_it:
37 | shutil.copyfile(filename, dst)
38 | print(" ", dst)
39 | if filename.endswith(".ipynb"):
40 | os.system("""sed -i "s/video src='\\(.*\\)\\.mp4'/video src='\\_static\\/\\1\\.mp4'/g" "%s" """ % dst)
41 |
42 | copyfiles(glob.glob("../../notebooks/*.ipynb") +
43 | glob.glob("../../*.md") +
44 | glob.glob("../../notebooks/*.gif") +
45 | glob.glob("../../notebooks/*.jpg") +
46 | glob.glob("../../notebooks/*.png"), "./")
47 |
48 | try:
49 | os.system("mkdir _static")
50 | except:
51 | pass
52 |
53 | copyfiles(glob.glob("../../notebooks/*.mp4"), "./_static/")
54 |
55 | # -- General configuration ------------------------------------------------
56 |
57 | # If your documentation needs a minimal Sphinx version, state it here.
58 | #
59 | # needs_sphinx = '1.0'
60 |
61 | # Add any Sphinx extension module names here, as strings. They can be
62 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
63 | # ones.
64 | extensions = [
65 | 'sphinx.ext.autodoc',
66 | 'sphinx.ext.doctest',
67 | 'sphinx.ext.intersphinx',
68 | 'sphinx.ext.todo',
69 | 'sphinx.ext.coverage',
70 | 'sphinx.ext.viewcode',
71 | 'sphinx.ext.githubpages',
72 | 'nbsphinx',
73 | 'jupyter_sphinx.embed_widgets',
74 | 'sphinxcontrib.napoleon',
75 | ]
76 |
77 | # Add any paths that contain templates here, relative to this directory.
78 | templates_path = ['_templates']
79 |
80 | source_parsers = {
81 | '.md': 'recommonmark.parser.CommonMarkParser',
82 | }
83 |
84 | # The suffix(es) of source filenames.
85 | # You can specify multiple suffix as a list of string:
86 | #
87 | source_suffix = ['.rst', '.md']
88 | # source_suffix = '.rst'
89 |
90 | # The master toctree document.
91 | master_doc = 'index'
92 |
93 | # General information about the project.
94 | project = 'conx'
95 | copyright = '2017, Douglas Blank'
96 | author = 'Douglas Blank'
97 |
98 | # The version info for the project you're documenting, acts as replacement for
99 | # |version| and |release|, also used in various other places throughout the
100 | # built documents.
101 | #
102 | # The short X.Y version.
103 | version = '3.0'
104 | # The full version, including alpha/beta/rc tags.
105 | release = '3.0.1'
106 |
107 | with io.open('../../conx/_version.py', encoding='utf-8') as fid:
108 | for line in fid:
109 | if line.startswith('__version__'):
110 | release = line.strip().split()[-1][1:-1]
111 | break
112 | version = ".".join(release.split(".")[:2])
113 |
114 | # The language for content autogenerated by Sphinx. Refer to documentation
115 | # for a list of supported languages.
116 | #
117 | # This is also used if you do content translation via gettext catalogs.
118 | # Usually you set "language" from the command line for these cases.
119 | language = None
120 |
121 | # List of patterns, relative to source directory, that match files and
122 | # directories to ignore when looking for source files.
123 | # This patterns also effect to html_static_path and html_extra_path
124 | exclude_patterns = []
125 |
126 | # The name of the Pygments (syntax highlighting) style to use.
127 | pygments_style = 'sphinx'
128 |
129 | # If true, `todo` and `todoList` produce output, else they produce nothing.
130 | todo_include_todos = True
131 |
132 |
133 | # -- Options for HTML output ----------------------------------------------
134 |
135 | # The theme to use for HTML and HTML Help pages. See the documentation for
136 | # a list of builtin themes.
137 | #
138 | html_theme = 'sphinx_rtd_theme'
139 | html_logo = 'img/logo.gif'
140 |
141 | # Theme options are theme-specific and customize the look and feel of a theme
142 | # further. For a list of options available for each theme, see the
143 | # documentation.
144 | #
145 | # html_theme_options = {}
146 |
147 | # Add any paths that contain custom static files (such as style sheets) here,
148 | # relative to this directory. They are copied after the builtin static files,
149 | # so a file named "default.css" will overwrite the builtin "default.css".
150 |
151 | html_static_path = ['_static']
152 | # html_theme_options = {}
153 |
154 | def setup(app):
155 | app.add_stylesheet('css/custom.css')
156 |
157 | # -- Options for HTMLHelp output ------------------------------------------
158 |
159 | # Output file base name for HTML help builder.
160 | htmlhelp_basename = 'conxdoc'
161 |
162 |
163 | # -- Options for LaTeX output ---------------------------------------------
164 |
165 | latex_elements = {
166 | # The paper size ('letterpaper' or 'a4paper').
167 | #
168 | # 'papersize': 'letterpaper',
169 |
170 | # The font size ('10pt', '11pt' or '12pt').
171 | #
172 | # 'pointsize': '10pt',
173 |
174 | # Additional stuff for the LaTeX preamble.
175 | #
176 | # 'preamble': '',
177 |
178 | # Latex figure (float) alignment
179 | #
180 | # 'figure_align': 'htbp',
181 | }
182 |
183 | # Grouping the document tree into LaTeX files. List of tuples
184 | # (source start file, target name, title,
185 | # author, documentclass [howto, manual, or own class]).
186 | latex_documents = [
187 | (master_doc, 'conx.tex', 'conx Documentation',
188 | 'Douglas Blank', 'manual'),
189 | ]
190 |
191 |
192 | # -- Options for manual page output ---------------------------------------
193 |
194 | # One entry per manual page. List of tuples
195 | # (source start file, name, description, authors, manual section).
196 | man_pages = [
197 | (master_doc, 'conx', 'conx Documentation',
198 | [author], 1)
199 | ]
200 |
201 |
202 | # -- Options for Texinfo output -------------------------------------------
203 |
204 | # Grouping the document tree into Texinfo files. List of tuples
205 | # (source start file, target name, title, author,
206 | # dir menu entry, description, category)
207 | texinfo_documents = [
208 | (master_doc, 'conx', 'conx Documentation',
209 | author, 'conx', 'One line description of project.',
210 | 'Miscellaneous'),
211 | ]
212 |
213 |
214 |
215 |
216 | # Example configuration for intersphinx: refer to the Python standard library.
217 | intersphinx_mapping = {'https://docs.python.org/3.6': None}
218 |
--------------------------------------------------------------------------------
/docs/source/conx.rst:
--------------------------------------------------------------------------------
1 | conx package
2 | ============
3 |
4 | Submodules
5 | ----------
6 |
7 | conx\.network module
8 | --------------------
9 |
10 | .. automodule:: conx.network
11 | :members:
12 | :undoc-members:
13 | :show-inheritance:
14 |
15 | conx\.dataset module
16 | --------------------
17 |
18 | .. automodule:: conx.dataset
19 | :members:
20 | :undoc-members:
21 | :show-inheritance:
22 |
23 | conx\.layers module
24 | -------------------
25 |
26 | .. automodule:: conx.layers
27 | :members:
28 | :undoc-members:
29 | :show-inheritance:
30 |
31 | conx\.utils module
32 | ------------------
33 |
34 | .. automodule:: conx.utils
35 | :members:
36 | :undoc-members:
37 | :show-inheritance:
38 |
39 | conx\.widgets module
40 | --------------------
41 |
42 | .. automodule:: conx.widgets
43 | :members:
44 | :undoc-members:
45 | :show-inheritance:
46 |
47 |
48 | conx\.activations module
49 | ------------------------
50 |
51 | .. automodule:: conx.activations
52 | :members:
53 | :undoc-members:
54 | :show-inheritance:
55 |
56 |
57 | Module contents
58 | ---------------
59 |
60 | .. automodule:: conx
61 | :members:
62 | :undoc-members:
63 | :show-inheritance:
64 |
--------------------------------------------------------------------------------
/docs/source/examples.rst:
--------------------------------------------------------------------------------
1 | Examples
2 | ========
3 |
4 | .. toctree::
5 | :maxdepth: 1
6 |
7 | Learning
8 | XOR
9 | MNIST
10 | Face recognition
11 | CIFAR10_CNN
12 | Auto-encoder with Conv2D
13 | Datasets
14 | VirtualDatasets
15 | Plotting
16 | Plot3D
17 | Autoencoder
18 | AliceInWonderland
19 | Predicting and Generating Texts
20 | RecommendingMovies
21 | LSTM
22 | ActivationFunctions
23 | Gridfonts
24 | Camera
25 | Robot Simulation
26 | Extrapolation1
27 | VisionQuest
28 | Experiments
29 | MakingMovies
30 | PCA
31 | Utilities
32 | Two-Spirals
33 | VGG16 and ImageNet
34 |
--------------------------------------------------------------------------------
/docs/source/img/logo.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ArtificialIntelligenceToolkit/conx/365f4c3e8712ea455c96eba072058f9d86f40af3/docs/source/img/logo.gif
--------------------------------------------------------------------------------
/docs/source/index.rst:
--------------------------------------------------------------------------------
1 | .. conx documentation master file, created by
2 | sphinx-quickstart on Sun Aug 6 11:46:27 2017.
3 | You can adapt this file completely to your liking, but it should at least
4 | contain the root `toctree` directive.
5 |
6 | Conx documentation
7 | ==================
8 |
9 | .. toctree::
10 | :maxdepth: 2
11 | :caption: Contents:
12 | :numbered:
13 |
14 | README
15 | Getting Started with conx
16 | examples
17 | modules
18 |
19 |
20 | Index and Search
21 | ================
22 |
23 | * :ref:`genindex`
24 | * :ref:`search`
25 |
--------------------------------------------------------------------------------
/docs/source/modules.rst:
--------------------------------------------------------------------------------
1 | conx
2 | ====
3 |
4 | .. toctree::
5 | :maxdepth: 4
6 |
7 | conx
8 |
--------------------------------------------------------------------------------
/readthedocs.yaml:
--------------------------------------------------------------------------------
1 | # Configuration for readthedocs
2 |
3 | # Important: we need to disable all unneeded formats.
4 | # Note that HTML and JSON are always built:
5 | # https://docs.readthedocs.io/en/latest/yaml-config.html#formats
6 | # Especially, the 'htmlzip' format takes a LOT of memory and causes
7 | # the build to fail - see our issue #1472:
8 | # https://github.com/aiidateam/aiida_core/issues/1472
9 | formats: []
10 |
11 | ## For now I don't specify any other parameter, that is
12 | ## currently setup in the web page of Read the Docs.
13 | ## For other parameters see
14 | ## https://docs.readthedocs.io/en/latest/yaml-config.html
15 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | IPython
2 | Pillow
3 | cairosvg
4 | h5py
5 | ipywidgets>=7.0
6 | keras>=2.1.3
7 | matplotlib
8 | numpy
9 | pydot
10 | requests
11 | sklearn
12 | svgwrite
13 | tqdm
14 |
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [metadata]
2 | license_file = LICENSE
3 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | import io
2 | import sys
3 | try:
4 | import pypandoc
5 | except:
6 | pypandoc = None
7 |
8 | from setuptools import find_packages, setup
9 |
10 | with io.open('conx/_version.py', encoding='utf-8') as fid:
11 | for line in fid:
12 | if line.startswith('__version__'):
13 | version = line.strip().split()[-1][1:-1]
14 | break
15 |
16 | with io.open('README.md', encoding='utf-8') as fp:
17 | long_desc = fp.read()
18 | if pypandoc is not None:
19 | try:
20 | long_desc = pypandoc.convert(long_desc, "rst", "markdown_github")
21 | except:
22 | pass
23 |
24 |
25 | setup(name='conx',
26 | version=version,
27 | description='On-Ramp to Deep Learning. Built on Keras',
28 | long_description=long_desc,
29 | author='Douglas S. Blank',
30 | author_email='doug.blank@gmail.com',
31 | url='https://github.com/Calysto/conx',
32 | install_requires=['numpy', 'keras>=2.1.3', 'matplotlib',
33 | 'ipywidgets>=7.0', 'Pillow', 'IPython',
34 | 'h5py', "svgwrite", "sklearn",
35 | "tqdm", "requests", "pydot", "cairosvg"],
36 | packages=find_packages(include=['conx', 'conx.*']),
37 | include_data_files = True,
38 | test_suite = 'nose.collector',
39 | classifiers=[
40 | 'Framework :: IPython',
41 | ('License :: OSI Approved :: ' +
42 | 'GNU Affero General Public License v3 or later (AGPLv3+)'),
43 | 'Programming Language :: Python :: 3',
44 | ]
45 | )
46 |
--------------------------------------------------------------------------------